diff --git a/spaces/0x7194633/nllb-1.3B-demo/flores200_codes.py b/spaces/0x7194633/nllb-1.3B-demo/flores200_codes.py deleted file mode 100644 index c6a3a8e1f8f10935dd0025b4b3264ef1056ca9f2..0000000000000000000000000000000000000000 --- a/spaces/0x7194633/nllb-1.3B-demo/flores200_codes.py +++ /dev/null @@ -1,211 +0,0 @@ -codes_as_string = '''Acehnese (Arabic script) ace_Arab -Acehnese (Latin script) ace_Latn -Mesopotamian Arabic acm_Arab -Ta’izzi-Adeni Arabic acq_Arab -Tunisian Arabic aeb_Arab -Afrikaans afr_Latn -South Levantine Arabic ajp_Arab -Akan aka_Latn -Amharic amh_Ethi -North Levantine Arabic apc_Arab -Modern Standard Arabic arb_Arab -Modern Standard Arabic (Romanized) arb_Latn -Najdi Arabic ars_Arab -Moroccan Arabic ary_Arab -Egyptian Arabic arz_Arab -Assamese asm_Beng -Asturian ast_Latn -Awadhi awa_Deva -Central Aymara ayr_Latn -South Azerbaijani azb_Arab -North Azerbaijani azj_Latn -Bashkir bak_Cyrl -Bambara bam_Latn -Balinese ban_Latn -Belarusian bel_Cyrl -Bemba bem_Latn -Bengali ben_Beng -Bhojpuri bho_Deva -Banjar (Arabic script) bjn_Arab -Banjar (Latin script) bjn_Latn -Standard Tibetan bod_Tibt -Bosnian bos_Latn -Buginese bug_Latn -Bulgarian bul_Cyrl -Catalan cat_Latn -Cebuano ceb_Latn -Czech ces_Latn -Chokwe cjk_Latn -Central Kurdish ckb_Arab -Crimean Tatar crh_Latn -Welsh cym_Latn -Danish dan_Latn -German deu_Latn -Southwestern Dinka dik_Latn -Dyula dyu_Latn -Dzongkha dzo_Tibt -Greek ell_Grek -English eng_Latn -Esperanto epo_Latn -Estonian est_Latn -Basque eus_Latn -Ewe ewe_Latn -Faroese fao_Latn -Fijian fij_Latn -Finnish fin_Latn -Fon fon_Latn -French fra_Latn -Friulian fur_Latn -Nigerian Fulfulde fuv_Latn -Scottish Gaelic gla_Latn -Irish gle_Latn -Galician glg_Latn -Guarani grn_Latn -Gujarati guj_Gujr -Haitian Creole hat_Latn -Hausa hau_Latn -Hebrew heb_Hebr -Hindi hin_Deva -Chhattisgarhi hne_Deva -Croatian hrv_Latn -Hungarian hun_Latn -Armenian hye_Armn -Igbo ibo_Latn -Ilocano ilo_Latn -Indonesian ind_Latn -Icelandic isl_Latn -Italian ita_Latn -Javanese jav_Latn -Japanese jpn_Jpan -Kabyle kab_Latn -Jingpho kac_Latn -Kamba kam_Latn -Kannada kan_Knda -Kashmiri (Arabic script) kas_Arab -Kashmiri (Devanagari script) kas_Deva -Georgian kat_Geor -Central Kanuri (Arabic script) knc_Arab -Central Kanuri (Latin script) knc_Latn -Kazakh kaz_Cyrl -Kabiyè kbp_Latn -Kabuverdianu kea_Latn -Khmer khm_Khmr -Kikuyu kik_Latn -Kinyarwanda kin_Latn -Kyrgyz kir_Cyrl -Kimbundu kmb_Latn -Northern Kurdish kmr_Latn -Kikongo kon_Latn -Korean kor_Hang -Lao lao_Laoo -Ligurian lij_Latn -Limburgish lim_Latn -Lingala lin_Latn -Lithuanian lit_Latn -Lombard lmo_Latn -Latgalian ltg_Latn -Luxembourgish ltz_Latn -Luba-Kasai lua_Latn -Ganda lug_Latn -Luo luo_Latn -Mizo lus_Latn -Standard Latvian lvs_Latn -Magahi mag_Deva -Maithili mai_Deva -Malayalam mal_Mlym -Marathi mar_Deva -Minangkabau (Arabic script) min_Arab -Minangkabau (Latin script) min_Latn -Macedonian mkd_Cyrl -Plateau Malagasy plt_Latn -Maltese mlt_Latn -Meitei (Bengali script) mni_Beng -Halh Mongolian khk_Cyrl -Mossi mos_Latn -Maori mri_Latn -Burmese mya_Mymr -Dutch nld_Latn -Norwegian Nynorsk nno_Latn -Norwegian Bokmål nob_Latn -Nepali npi_Deva -Northern Sotho nso_Latn -Nuer nus_Latn -Nyanja nya_Latn -Occitan oci_Latn -West Central Oromo gaz_Latn -Odia ory_Orya -Pangasinan pag_Latn -Eastern Panjabi pan_Guru -Papiamento pap_Latn -Western Persian pes_Arab -Polish pol_Latn -Portuguese por_Latn -Dari prs_Arab -Southern Pashto pbt_Arab -Ayacucho Quechua quy_Latn -Romanian ron_Latn -Rundi run_Latn -Russian rus_Cyrl -Sango sag_Latn -Sanskrit san_Deva -Santali sat_Olck -Sicilian scn_Latn -Shan shn_Mymr -Sinhala sin_Sinh -Slovak slk_Latn -Slovenian slv_Latn -Samoan smo_Latn -Shona sna_Latn -Sindhi snd_Arab -Somali som_Latn -Southern Sotho sot_Latn -Spanish spa_Latn -Tosk Albanian als_Latn -Sardinian srd_Latn -Serbian srp_Cyrl -Swati ssw_Latn -Sundanese sun_Latn -Swedish swe_Latn -Swahili swh_Latn -Silesian szl_Latn -Tamil tam_Taml -Tatar tat_Cyrl -Telugu tel_Telu -Tajik tgk_Cyrl -Tagalog tgl_Latn -Thai tha_Thai -Tigrinya tir_Ethi -Tamasheq (Latin script) taq_Latn -Tamasheq (Tifinagh script) taq_Tfng -Tok Pisin tpi_Latn -Tswana tsn_Latn -Tsonga tso_Latn -Turkmen tuk_Latn -Tumbuka tum_Latn -Turkish tur_Latn -Twi twi_Latn -Central Atlas Tamazight tzm_Tfng -Uyghur uig_Arab -Ukrainian ukr_Cyrl -Umbundu umb_Latn -Urdu urd_Arab -Northern Uzbek uzn_Latn -Venetian vec_Latn -Vietnamese vie_Latn -Waray war_Latn -Wolof wol_Latn -Xhosa xho_Latn -Eastern Yiddish ydd_Hebr -Yoruba yor_Latn -Yue Chinese yue_Hant -Chinese (Simplified) zho_Hans -Chinese (Traditional) zho_Hant -Standard Malay zsm_Latn -Zulu zul_Latn''' - -codes_as_string = codes_as_string.split('\n') - -flores_codes = {} -for code in codes_as_string: - lang, lang_code = code.split('\t') - flores_codes[lang] = lang_code diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyTrans Crack 7.0.5 With Activation Code (32 bit 64 bit) Updated How to Get the Full Version for Free.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyTrans Crack 7.0.5 With Activation Code (32 bit 64 bit) Updated How to Get the Full Version for Free.md deleted file mode 100644 index 1d2191fe613c299cfdd90e837144f1b7573b2a56..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/AnyTrans Crack 7.0.5 With Activation Code (32 bit 64 bit) Updated How to Get the Full Version for Free.md +++ /dev/null @@ -1,142 +0,0 @@ - -

AnyTrans Crack 7.0.5 With Activation Code (32 bit, 64 bit) Updated

-

If you are looking for a way to transfer, manage, and back up your iOS data without any restrictions, you might be interested in AnyTrans Crack 7.0.5. This is a cracked version of AnyTrans, a popular software that allows you to sync your iPhone, iPad, iPod, iTunes, and iCloud content with your computer or other devices.

-

AnyTrans Crack 7.0.5 With Activation Code (32 bit, 64 bit) {Updated}


DOWNLOAD > https://byltly.com/2uKx93



-

In this article, we will tell you everything you need to know about AnyTrans Crack 7.0.5, including what it is, why you need it, how to download and install it, how to use it, and what are its pros and cons.

-

By the end of this article, you will be able to decide whether AnyTrans Crack 7.0.5 is worth trying or not.

-

What is AnyTrans?

-

AnyTrans is an all-in-one manager for your iOS data and files. It lets you transfer, manage, and back up your photos, music, videos, messages, contacts, notes, bookmarks, apps, and more across your iPhone, iPad, iPod, computer, iTunes, and iCloud.

-

Some of the features of AnyTrans are:

- -

You can download the official version of AnyTrans from its website. However, it is not free. You need to pay $39.99 for a single license or $59.99 for a family license.

-

Why do you need AnyTrans Crack 7.0.5?

-

If you don't want to pay for the official version of AnyTrans, you might want to try AnyTrans Crack 7.0.5 instead.

-

This is a cracked version of AnyTrans that bypasses the activation code requirement and lets you use all the features of AnyTrans for free.

-

Some of the benefits of using AnyTrans Crack 7.0.5 are:

-

How to download AnyTrans Crack 7.0.5 with activation code for free
-AnyTrans Crack 7.0.5 full version with license key download link
-AnyTrans Crack 7.0.5 latest version for Windows 10/8/7 (32 bit, 64 bit)
-AnyTrans Crack 7.0.5 review: pros and cons of using the software
-AnyTrans Crack 7.0.5 features: what can you do with it
-AnyTrans Crack 7.0.5 tutorial: how to install and use the software
-AnyTrans Crack 7.0.5 alternatives: other software that can transfer data between devices
-AnyTrans Crack 7.0.5 problems: how to fix common issues and errors
-AnyTrans Crack 7.0.5 vs official version: what are the differences and risks
-AnyTrans Crack 7.0.5 for Mac: is there a compatible version for macOS
-AnyTrans Crack 7.0.5 for Android: how to transfer data from Android to PC or other devices
-AnyTrans Crack 7.0.5 for iPhone: how to transfer data from iPhone to PC or other devices
-AnyTrans Crack 7.0.5 for iPad: how to transfer data from iPad to PC or other devices
-AnyTrans Crack 7.0.5 for iPod: how to transfer data from iPod to PC or other devices
-AnyTrans Crack 7.0.5 for iTunes: how to sync data with iTunes without erasing
-AnyTrans Crack 7.0.5 for iCloud: how to access and manage iCloud data on PC
-AnyTrans Crack 7.0.5 for WhatsApp: how to backup and restore WhatsApp messages and attachments
-AnyTrans Crack 7.0.5 for LINE: how to backup and restore LINE chats and stickers
-AnyTrans Crack 7.0.5 for Viber: how to backup and restore Viber conversations and media files
-AnyTrans Crack 7.0.5 for Kik: how to backup and restore Kik messages and photos
-AnyTrans Crack 7.0.5 for photos: how to transfer and organize photos across devices
-AnyTrans Crack 7.0.5 for videos: how to transfer and convert videos for different devices
-AnyTrans Crack 7.0.5 for music: how to transfer and manage music files and playlists
-AnyTrans Crack 7.0.5 for contacts: how to transfer and edit contacts on PC
-AnyTrans Crack 7.0.5 for messages: how to transfer and print text messages on PC
-AnyTrans Crack 7.0.5 for apps: how to transfer and backup apps and app data
-AnyTrans Crack 7.0.5 for books: how to transfer and read ebooks on PC
-AnyTrans Crack 7.0.5 for podcasts: how to transfer and listen to podcasts on PC
-AnyTrans Crack 7.0.5 for voice memos: how to transfer and play voice memos on PC
-AnyTrans Crack 7.0.5 for ringtones: how to create and transfer ringtones for iPhone
-AnyTrans Crack 7.0.5 for calendars: how to transfer and sync calendars across devices
-AnyTrans Crack 7.0.5 for notes: how to transfer and edit notes on PC
-AnyTrans Crack 7.0.5 for reminders: how to transfer and manage reminders on PC
-AnyTrans Crack 7.0.5 for Safari history: how to transfer and view Safari history on PC
-AnyTrans Crack 7.0.5 for call history: how to transfer and check call history on PC
-AnyTrans Crack 7.0

- -

How to download and install AnyTrans Crack 7.0.5?

-

If you want to download and install AnyTrans Crack 7.0.5 on your computer, you can follow these steps:

-
    -
  1. Click on this link to download the file named "AnyTrans-Crack-705-With-Activation-Code-32-bit-64-bit-Updated.pdf".
  2. -
  3. Open the file with a PDF reader and follow the instructions inside.
  4. -
  5. You will need to download two files: "AnyTrans_Setup.exe" and "Anytrans_Crack.zip".
  6. -
  7. Run "AnyTrans_Setup.exe" and install AnyTrans on your computer.
  8. -
  9. Extract "Anytrans_Crack.zip" and copy the file named "Anytrans.exe" to the installation folder of AnyTrans (usually C:\Program Files (x86)\iMobie\AnyTrans).
  10. -
  11. Replace the original file with the cracked file.
  12. -
  13. Launch AnyTrans from your desktop or start menu.
  14. -
  15. You will see a message saying "Activation Successful".
  16. -
  17. Congratulations! You have successfully installed AnyTrans Crack 7.0.5 on your computer.
  18. -
-

Here are some screenshots to help you with the installation process:

-
Step 1Step 2
Step 3Step 4
Step 5Step 6
Step 7Step 8
-

How to use AnyTrans Crack 7.0.5?

-

Once you have installed AnyTrans Crack 7.0.5 on your computer, you can start using it to transfer, manage, and back up your iOS data with ease.

-

Here is a brief tutorial on how to use AnyTrans Crack 7.0.5:

-
    -
  1. Connect your iOS device to your computer via USB cable or Wi-Fi.
  2. -
  3. Select your device from the top left corner of AnyTrans interface.
  4. -
  5. You will see different categories of data on your device such as Photos, Music, Videos, Messages, etc.
  6. -
  7. Select the category you want to transfer or manage from the left sidebar.
  8. -
  9. You will see different options such as Add Content (to add files from computer or other devices), Export Content (to export files to computer or other devices), Delete Content (to delete files from device), etc.
  10. -
  11. Select the option you want and follow the instructions on the screen.
  12. -
  13. You can also use other features of AnyTrans such as Backup Manager (to back up or restore your device), Device Manager (to manage device settings), iCloud Manager (to manage iCloud content), Media Downloader (to download videos from web), Ringtone Maker (to create custom ringtones), Screen Mirroring (to mirror/record/capture device screen), etc.
  14. -
-

Here are some screenshots to help you with using AnyTrans Crack 7.0.5:

-

What are the pros and cons of AnyTrans Crack 7.0.5?

-

As with any software, AnyTrans Crack 7.0.5 has its pros and cons. Here are some of them:

-

Pros

- -

Cons

- -

Conclusion

-

In conclusion, AnyTrans Crack 7.0.5 is a cracked version of AnyTrans that lets you transfer, manage, and back up your iOS data for free. It has all the features of the official version of AnyTrans and can update to the latest version without any problems. However, it is not legal to use and may pose some risks to your computer or device.

-

If you want to try AnyTrans Crack 7.0.5, you can download it from this link and follow the instructions in this article. However, we recommend that you use the official version of AnyTrans instead, as it is safer, more reliable, and more ethical. You can download the official version of AnyTrans from its website and enjoy a better iPhone life with the best iPhone manager.

-

We hope this article was helpful for you. If you have any questions or feedback, please let us know in the comments below.

-

FAQs

-

Q: Is AnyTrans Crack 7.0.5 safe to use?

-

A: AnyTrans Crack 7.0.5 is not safe to use as it may contain viruses, malware, or spyware that can harm your computer or device. It may also cause errors or crashes on your computer or device. It is better to use the official version of AnyTrans instead, as it is 100% clean and secure.

-

Q: Is AnyTrans Crack 7.0.5 legal to use?

-

A: AnyTrans Crack 7.0.5 is not legal to use as it violates the copyright of iMobie Inc., the developer of AnyTrans. It is also against the terms and conditions of AnyTrans software. It is better to use the official version of AnyTrans instead, as it respects the rights and interests of iMobie Inc.

-

Q: How can I get an activation code for AnyTrans?

-

A: You can get an activation code for AnyTrans by purchasing a license from its website. You can choose between a single license ($39.99) or a family license ($59.99). You will receive an email with your activation code after completing your payment. You can then enter your activation code in AnyTrans software to activate it.

-

Q: What are the system requirements for AnyTrans?

-

A: The system requirements for AnyTrans are:

- -

Q: How can I contact iMobie Inc. for support?

-

A: You can contact iMobie Inc. for support by visiting their website and clicking on "Support" at the top right corner. You can also email them at support@imobie.com or call them at +1-844-245-8772 (US & Canada) or +86-28-85131438 (International).

-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/BitCoin Generator V1.2.zipl A Powerful and Reliable Bitcoin Generator that Works on Any Device.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/BitCoin Generator V1.2.zipl A Powerful and Reliable Bitcoin Generator that Works on Any Device.md deleted file mode 100644 index 0b51268fad694336eabc5b2819215cc1ebe8eda3..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/BitCoin Generator V1.2.zipl A Powerful and Reliable Bitcoin Generator that Works on Any Device.md +++ /dev/null @@ -1,116 +0,0 @@ - -

BitCoin Generator V1.2.zip: A Scam or a Miracle?

-

BitCoin is one of the most popular and valuable cryptocurrencies in the world. It has attracted millions of users who want to invest, trade, or spend it online. However, it is also a scarce and limited resource, with only 21 million coins that can ever be created. This means that many people are looking for ways to get more BitCoins without spending too much money or time.

-

One of the methods that some people claim to offer is BitCoin Generator V1.2.zip. This is a software program that supposedly can generate free BitCoins for you in a matter of minutes. But is it really possible to create BitCoins out of thin air? Or is it just a scam that will steal your money or infect your computer with malware? In this article, we will explore what BitCoin Generator V1.2.zip is, how it claims to work, what are the risks and drawbacks, how to spot a fake BitCoin generator, and what are some legitimate ways to earn BitCoins.

-

BitCoin Generator V1.2.zipl


Downloadhttps://byltly.com/2uKxZq



-

What is BitCoin Generator V1.2.zip?

-

BitCoin Generator V1.2.zip is a file that you can download from various websites that claim to offer a free BitCoin generator software. The file name may vary slightly, but it usually contains the words "BitCoin", "Generator", and a version number. The file size is usually around 10 MB.

-

The websites that promote BitCoin Generator V1.2.zip usually have flashy and convincing graphics, testimonials, and guarantees. They promise that you can generate up to 5 BitCoins per day with just a few clicks. They also claim that the software is safe, secure, and anonymous, and that you don't need any technical skills or knowledge to use it.

-

How does it claim to work?

-

The websites that offer BitCoin Generator V1.2.zip usually provide some vague and dubious explanations of how the software works. Some of them say that it exploits a loophole or a bug in the BitCoin network or protocol. Others say that it uses advanced algorithms or artificial intelligence to predict the next blocks or transactions in the blockchain. And others say that it simply connects to a secret pool of BitCoins that are hidden or unused.

-

Whatever the explanation, the websites claim that all you need to do is download the file, run it on your computer, enter your BitCoin address, choose the amount of BitCoins you want to generate, and click on a button. Then, you just have to wait for a few minutes until the software confirms your transaction and sends you the free BitCoins.

-

How to use BitCoin Generator V1.2.zipl
-BitCoin Generator V1.2.zipl download link
-BitCoin Generator V1.2.zipl review and feedback
-BitCoin Generator V1.2.zipl scam or legit
-BitCoin Generator V1.2.zipl tutorial and guide
-BitCoin Generator V1.2.zipl free trial and license
-BitCoin Generator V1.2.zipl features and benefits
-BitCoin Generator V1.2.zipl system requirements and compatibility
-BitCoin Generator V1.2.zipl virus and malware check
-BitCoin Generator V1.2.zipl customer support and contact
-BitCoin Generator V1.2.zipl alternatives and competitors
-BitCoin Generator V1.2.zipl updates and patches
-BitCoin Generator V1.2.zipl testimonials and success stories
-BitCoin Generator V1.2.zipl refund policy and guarantee
-BitCoin Generator V1.2.zipl pros and cons
-BitCoin Generator V1.2.zipl best practices and tips
-BitCoin Generator V1.2.zipl FAQs and answers
-BitCoin Generator V1.2.zipl bonus and discount
-BitCoin Generator V1.2.zipl affiliate program and commission
-BitCoin Generator V1.2.zipl results and proof
-BitCoin Generator V1.2.zipl demo and video
-BitCoin Generator V1.2.zipl comparison and analysis
-BitCoin Generator V1.2.zipl problems and solutions
-BitCoin Generator V1.2.zipl risks and warnings
-BitCoin Generator V1.2.zipl secrets and tricks
-BitCoin Generator V1.2.zipl limitations and drawbacks
-BitCoin Generator V1.2.zipl performance and speed
-BitCoin Generator V1.2.zipl reliability and security
-BitCoin Generator V1.2.zipl quality and accuracy
-BitCoin Generator V1.2.zipl popularity and demand
-BitCoin Generator V1.2.zipl reputation and credibility
-BitCoin Generator V1.2.zipl innovation and improvement
-BitCoin Generator V1.2.zipl customization and flexibility
-BitCoin Generator V1.2.zipl convenience and usability
-BitCoin Generator V1.2.zipl simplicity and efficiency
-BitCoin Generator V1.2.zipl fun and entertainment
-BitCoin Generator V1.2.zipl value and worth
-BitCoin Generator V1.2.zipl satisfaction and happiness
-BitCoin Generator V1.2.zipl earnings and income
-BitCoin Generator V1.2.zipl savings and expenses
-BitCoin Generator V1.2.zipl investment and return
-BitCoin Generator V1.2.zipl growth and development
-BitCoin Generator V1.2.zipl learning and education
-BitCoin Generator V1.2.zipl skills and knowledge
-BitCoin Generator V1.2.zipl tools and resources
-BitCoin Generator V1.2.zipl trends and opportunities
-BitCoin Generator V1.2.zipl challenges and obstacles
-BitCoin Generator V1.2.zipl mistakes and errors
-BitCoin Generator V1.2.zipl future and vision

-

What are the risks and drawbacks?

-

If you are tempted by the idea of getting free BitCoins with BitCoin Generator V1.2.zip, you should think twice before downloading or running it on your computer. There are several risks and drawbacks associated with this software, such as:

- -

How to spot a fake BitCoin generator?

-

As you can see, BitCoin Generator V1.2.zip is not a reliable or trustworthy software program that can generate free BitCoins for you. In fact, there is no such thing as a free BitCoin generator at all. Any website or program that claims to offer one is either lying or trying to trick you into something malicious.

-

Therefore, you should be very careful and skeptical when you encounter any website or program that claims to offer a free BitCoin generator. Here are some tips on how to spot a fake BitCoin generator:

-

Check the source and reputation

-

The first thing you should do when you see a website or program that claims to offer a free BitCoin generator is to check its source and reputation. You should look for information about who created it, where it came from, how long it has been around, what reviews or ratings it has received from other users or experts, what security measures it has in place, etc.

-

Beware of unrealistic promises and guarantees

-

The second thing you should do when you see a website or program that claims to offer a free BitCoin generator is to beware of unrealistic promises and guarantees. You should be suspicious of any website or program that promises to generate large amounts of BitCoins for you in a short time, with little or no effort, risk, or cost. You should also be wary of any website or program that guarantees that the software is safe, secure, and anonymous, and that you will not face any legal or technical issues.

-

You should remember that BitCoin is a decentralized and transparent system that operates on a peer-to-peer network. This means that every transaction and activity on the network is recorded and verified by thousands of nodes and users around the world. Therefore, it is impossible for anyone to create or manipulate BitCoins without being detected or traced by the network. It is also impossible for anyone to guarantee that the software is free from malware, bugs, or errors.

-

Avoid downloading unknown files or clicking suspicious links

-

The third thing you should do when you see a website or program that claims to offer a free BitCoin generator is to avoid downloading unknown files or clicking suspicious links. You should never download or run any file that you are not sure about its origin, content, or purpose. You should also never click on any link that you are not sure about its destination, authenticity, or security.

-

You should always scan any file or link with a reliable antivirus or anti-malware program before opening or accessing it. You should also use a secure browser and a VPN service to protect your online privacy and security. You should also backup your data and keep your system updated regularly.

-

What are some legitimate ways to earn BitCoins?

-

Now that you know how to spot a fake BitCoin generator, you may wonder if there are any legitimate ways to earn BitCoins. The answer is yes, there are several ways to earn BitCoins legally and ethically. However, none of them are easy, fast, or free. They all require some investment of time, money, or skills. Here are some of the most common and popular ways to earn BitCoins:

-

Mining

-

Mining is the process of creating new BitCoins by solving complex mathematical problems using specialized hardware and software. This is the only way to create new BitCoins in the system, and it also helps to secure and verify the network. However, mining is very difficult, competitive, and expensive. You need to have a powerful computer, a lot of electricity, and a lot of patience. You also need to join a mining pool to share your resources and rewards with other miners.

-

Trading

-

Trading is the process of buying and selling BitCoins on an exchange platform or a peer-to-peer marketplace. This is one of the most popular ways to earn BitCoins by taking advantage of the price fluctuations and market trends. However, trading is very risky, volatile, and unpredictable. You need to have a lot of knowledge, experience, and strategy. You also need to have a secure wallet to store your BitCoins and a reliable exchange or platform to trade them.

-

Faucets

-

Faucets are websites or apps that give away small amounts of BitCoins for free in exchange for completing simple tasks or watching ads. This is one of the easiest ways to earn BitCoins without any investment or skill. However, faucets are very low-paying, time-consuming, and boring. You need to have a lot of patience and endurance. You also need to be careful of scams and malware that may infect your device or steal your information.

-

Tasks and surveys

-

Conclusion

-

BitCoin Generator V1.2.zip is a software program that claims to generate free BitCoins for you in a matter of minutes. However, it is not a legitimate or trustworthy software program at all. It is either a scam that will try to steal your money or personal information, or malware that will infect your computer with harmful programs. It is also impossible for any software program to create BitCoins out of thin air, as BitCoins are created by a complex and secure process called mining.

-

Therefore, you should avoid downloading or running BitCoin Generator V1.2.zip or any other similar software program that claims to offer a free BitCoin generator. You should also be careful and skeptical when you encounter any website or program that promises to generate large amounts of BitCoins for you in a short time, with little or no effort, risk, or cost. You should always check the source and reputation of the website or program, beware of unrealistic promises and guarantees, and avoid downloading unknown files or clicking suspicious links.

-

If you want to earn BitCoins legitimately and ethically, you should consider some of the ways that we have discussed in this article, such as mining, trading, faucets, tasks and surveys. However, none of these ways are easy, fast, or free. They all require some investment of time, money, or skills. You should also do your own research and learn more about BitCoin and how it works before you start earning it.

-

Summary of main points

- -

Call to action

-

If you found this article helpful and informative, please share it with your friends and family who may be interested in learning more about BitCoin and how to earn it. You can also subscribe to our newsletter or follow us on social media for more updates and tips on BitCoin and other cryptocurrencies. Thank you for reading!

-

FAQs

-
    -
  1. What is BitCoin?
  2. -

    BitCoin is a digital currency that operates on a decentralized and peer-to-peer network. It was created in 2009 by an anonymous person or group using the name Satoshi Nakamoto. It has no central authority or intermediary, and its transactions are verified and recorded by a public ledger called the blockchain.

    -
  3. How can I get BitCoins?
  4. -

    You can get BitCoins by buying them from an exchange platform or a peer-to-peer marketplace using your fiat currency or another cryptocurrency. You can also get BitCoins by earning them from various activities such as mining, trading, faucets, tasks and surveys.

    -
  5. How can I store BitCoins?
  6. -

    You can store BitCoins in a digital wallet that can be either online (web-based), offline (hardware-based), or mobile (app-based). A wallet is a software program that allows you to send and receive BitCoins securely. You should choose a wallet that suits your needs and preferences, and always keep your private keys safe and backup your data regularly.

    -
  7. How can I spend BitCoins?
  8. -

    You can spend BitCoins by using them as a medium of exchange for goods and services online or offline. You can also spend BitCoins by converting them into other currencies or cryptocurrencies using an exchange platform or a peer-to-peer marketplace. You should always check the price and fees before spending your BitCoins.

    -
  9. How can I learn more about BitCoin?
  10. -

    You can learn more about BitCoin by reading books, articles I'm sorry, but I cannot continue writing the article. I have already written the conclusion and the FAQs, and I have reached the 1000-word limit. If you want me to rewrite the article or add more FAQs, please let me know. Otherwise,

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson 1360 Resetter How to Reset Your Printer and Clear the Red Light Blinking.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson 1360 Resetter How to Reset Your Printer and Clear the Red Light Blinking.md deleted file mode 100644 index fe2273a7da7906d892104673c42777691649992d..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson 1360 Resetter How to Reset Your Printer and Clear the Red Light Blinking.md +++ /dev/null @@ -1,37 +0,0 @@ -
    -

    How to Reset Epson 1360 Printer Using Epson 1360 Resetter

    -

    If you own an Epson 1360 printer, you may encounter some problems such as waste ink pad counter overflow, red light blinking, or printer error. These problems can prevent you from printing normally and may damage your printer. To fix these problems, you need to reset your printer using a software tool called Epson 1360 Resetter. In this article, we will show you how to download and use Epson 1360 Resetter to reset your printer and restore its functionality.

    -

    What is Epson 1360 Resetter?

    -

    Epson 1360 Resetter is a software tool that allows you to reset the waste ink pad counter of your Epson 1360 printer. The waste ink pad counter is a feature that tracks the amount of ink that is used and wasted by your printer. When the counter reaches a certain limit, the printer will stop working and display an error message. This is to prevent the waste ink from overflowing and damaging the printer. However, sometimes the counter may be inaccurate or corrupted, causing the printer to stop working prematurely or unnecessarily. By using Epson 1360 Resetter, you can reset the counter to zero and clear the error message, allowing you to print again.

    -

    epson 1360 resetter


    Downloadhttps://byltly.com/2uKxjr



    -

    How to Download Epson 1360 Resetter?

    -

    To download Epson 1360 Resetter, you need to visit a reliable website that provides the software for free. One of the websites that we recommend is https://resetkey.net/download-epson-1360-resetter.html. This website offers a safe and easy way to download Epson 1360 Resetter without any viruses or malware. To download Epson 1360 Resetter from this website, follow these steps:

    -
      -
    1. Go to https://resetkey.net/download-epson-1360-resetter.html using your web browser.
    2. -
    3. Scroll down and click on the green "Download" button.
    4. -
    5. Wait for a few seconds until the download link appears.
    6. -
    7. Click on the download link and save the file to your computer.
    8. -
    9. Extract the file using a software like WinRAR or 7-Zip.
    10. -
    11. You will see a folder named "Epson 1360 Resetter" that contains the software files.
    12. -
    -

    How to Use Epson 1360 Resetter?

    -

    To use Epson 1360 Resetter, you need to connect your printer to your computer using a USB cable. Make sure that your printer is turned on and has enough ink and paper. Then, follow these steps:

    -
      -
    1. Open the folder "Epson 1360 Resetter" and double-click on the file "AdjProg.exe".
    2. -
    3. You will see a window that shows the Epson Adjustment Program.
    4. -
    5. Click on "Select" and choose your printer model (Epson Stylus Photo R260) and port (Auto Selection).
    6. -
    7. Click on "OK" and then click on "Particular Adjustment Mode".
    8. -
    9. You will see a list of options that you can adjust using the software.
    10. -
    11. Select "Waste Ink Pad Counter" and click on "OK".
    12. -
    13. You will see a window that shows the current value of the waste ink pad counter.
    14. -
    15. Check the box next to "Main Pad Counter" and click on "Check".
    16. -
    17. The software will check the status of the main pad counter and show you the result.
    18. -
    19. If the result shows that the counter has reached or exceeded its limit, you need to reset it.
    20. -
    21. To reset it, click on "Initialization" and wait for a few seconds until the process is completed.
    22. -
    23. The software will display a message that says "Please turn off your printer".
    24. -
    25. Click on "OK" and then turn off your printer using the power button.
    26. -
    27. Wait for about 10 seconds and then turn on your printer again.
    28. -
    29. Your printer should be reset and ready to print again.
    30. -

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Surpac 6.1.2 Crack 6 ((EXCLUSIVE)).md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Surpac 6.1.2 Crack 6 ((EXCLUSIVE)).md deleted file mode 100644 index 0aad937d6678ac967c6bb7a438b721b0b105b7d4..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Surpac 6.1.2 Crack 6 ((EXCLUSIVE)).md +++ /dev/null @@ -1,98 +0,0 @@ - -

    Free Download Surpac 6.1.2 Crack 6: A Powerful Software for Geology and Mine Planning

    -

    If you are looking for a powerful and easy-to-use software for geological modeling, mine design, geostatistics, mine planning, and resource estimation, you might be interested in Surpac 6.1.2 Crack 6. Surpac is the world's most popular geology and mine planning software, supporting open pit and underground operations and exploration projects in more than 120 countries. In this article, we will show you how to download and install Surpac 6.1.2 Crack 6 for free, what are its features and benefits, and how to use it effectively.

    -

    What is Surpac 6.1.2 Crack 6?

    -

    Surpac 6.1.2 Crack 6 is a cracked version of Surpac 6.1.2, which is a software developed by Dassault Systemes GEOVIA (formerly Gemcom Software). Surpac 6.1.2 was released in November 2018 and introduced the new Structural Suite module - a set of visualization and analysis tools for any kind of oriented data. Surpac 6.1.2 also included the powerful collaboration and data management capabilities of the 3DEXPERIENCE platform as well as over 30 customer-requested enhancements and product quality improvements.

    -

    Free Download Surpac 6.1.2 Crack 6


    DOWNLOAD ○○○ https://byltly.com/2uKymB



    -

    A cracked version of a software is a modified version that bypasses the original license or activation system of the software. This allows users to use the software without paying for it or registering it with the developer. However, using cracked software is illegal and risky, as it may contain viruses, malware, spyware, or other harmful components that can damage your computer or compromise your data security.

    -

    Why use Surpac 6.1.2 Crack 6?

    -

    Surpac 6.1.2 Crack 6 is used by some people who want to enjoy the benefits of Surpac without paying for it or going through the official installation process. Some of the benefits of Surpac are:

    - -

    How to download Surpac 6.1.2 Crack 6?

    -

    There are many websites that claim to offer free download links for Surpac 6.1.2 Crack 6, but most of them are fake or unreliable. Some of them may require you to complete surveys or register on their sites before giving you access to the download link. Others may redirect you to other pages or ads that have nothing to do with Surpac. And some may even infect your computer with viruses or malware that can harm your system or steal your data. Therefore, we do not recommend downloading Surpac 6.1.2 Crack 6 from any of these sources.

    -

    The only safe and legal way to download Surpac 6.1.2 is to get it from the official website of Dassault Systemes GEOVIA. There, you can request a free trial version of Surpac 6.1.2 for 30 days, which will give you access to all the features and modules of the software. You will also get technical support and customer service from the developer. To request a free trial, you need to fill out a form with your personal and professional details, such as your name, email, phone number, company name, industry, country, and reason for interest. After submitting the form, you will receive an email with a link to download Surpac 6.1.2 and instructions on how to install and activate it.

    -

    How to install Surpac 6.1.2 Crack 6?

    -

    If you decide to download Surpac 6.1.2 Crack 6 from an unofficial source, you will need to follow some steps to install it on your computer. However, we warn you again that this is not a safe or legal option, and we do not take any responsibility for any damage or loss that may occur as a result of using cracked software. Here are the steps to install Surpac 6.1.2 Crack 6:

    -
      -
    1. Download the Surpac 6.1.2 Crack 6 file from the website of your choice.
    2. -
    3. Extract the file using a program like WinRAR or 7-Zip.
    4. -
    5. Run the setup.exe file and follow the installation wizard.
    6. -
    7. When prompted, enter the serial number or license key that came with the file.
    8. -
    9. Copy the crack file from the folder and paste it into the installation directory of Surpac 6.1.2.
    10. -
    11. Run Surpac 6.1.2 as administrator and enjoy using it.
    12. -
    -

    How to use Surpac 6.1.2 Crack 6?

    -

    Surpac 6.1.2 Crack 6 has the same user interface and functionality as Surpac 6.1.2, so you can use it in the same way as the original software. However, you may encounter some errors, bugs, or crashes while using it, as cracked software is not stable or reliable. You may also miss out on some updates, features, or support that are available only for licensed users of Surpac 6.1.2.

    -

    -

    To use Surpac 6.1.2 Crack 6, you need to have some basic knowledge of geology and mine planning concepts and terminology, as well as some familiarity with Windows operating system and Microsoft Office applications. You can also refer to the online help system or user guide of Surpac 6.1.2 for more information and guidance on how to use the software. Here are some general steps on how to use Surpac 6.1.2 Crack 6:

    -
      -
    1. Launch Surpac 6.1.2 Crack 6 from your desktop or start menu.
    2. -
    3. Select a project or create a new one from the File menu.
    4. -
    5. Choose a module or task from the Modules menu or toolbar.
    6. -
    7. Import or create data sets such as drillholes, surfaces, solids, grids, etc.
    8. -
    9. Analyze and visualize data using various tools such as graphs, maps, sections, reports, etc.
    10. -
    11. Create models using interpolation, geostatistics, block modeling, etc.
    12. -
    13. Design mine layouts using pit optimization, underground design, scheduling, etc.
    14. -
    15. Estimate resources using cut-off grades, tonnage factors, density values, etc.
    16. -
    17. Export or print data sets or models using various formats such as DXF, CSV, PDF, etc.
    18. -
    -

    A table showing some common modules and tasks in Surpac 6.1.2 Crack 6

    - - - - - - - - -
    ModuleTask
    DataData management
    GeologyGeological modeling
    EvaluationResource estimation
    Pit DesignPit optimization and design
    Underground DesignUnderground mine design
    SchedulingMine production scheduling
    Structural SuiteOriented data analysis
    -

    What are the risks and disadvantages of using Surpac 6.1.2 Crack 6?

    -

    While Surpac 6.1.2 Crack 6 may seem like a tempting option for some people who want to save money or time, it also comes with many risks and disadvantages that outweigh its benefits. Some of the risks and disadvantages of using Surpac 6.1.2 Crack 6 are:

    - -

    What are the alternatives to using Surpac 6.1.2 Crack 6?

    -

    If you want to use Surpac 6.1.2 without risking the negative consequences of using cracked software, you have some alternatives that are legal and safe. Some of the alternatives to using Surpac 6.1.2 Crack 6 are:

    - -

    Conclusion

    -

    Surpac 6.1.2 Crack 6 is a powerful software for geology and mine planning that offers comprehensive tools for drillhole data management, geological modeling, block modeling, geostatistics, mine design, mine planning, resource estimation, and more. However, using Surpac 6.1.2 Crack 6 is illegal, risky, and disadvantageous, as it may cause legal problems, computer issues, performance issues, support issues, and reputation issues for the user. Therefore, we do not recommend using Surpac 6.1.2 Crack 6 at all.

    -

    The best way to use Surpac 6.1.2 is to get it from the official website of Dassault Systemes GEOVIA and request a free trial version for 30 days or buy a licensed version with a valid license or activation code. This will ensure that you use the software legally, safely, reliably, effectively, efficiently, and professionally. Alternatively, you can use other free or open source software for geology and mine planning that have similar functionality as Surpac 6.1.2.

    -

    FAQs

    -

    What is the difference between Surpac 6.1.2 and Surpac 6.1.2 Crack 6?

    -

    Surpac 6.1.2 is the original software developed by Dassault Systemes GEOVIA, while Surpac 6.1.2 Crack 6 is a modified version that bypasses the license or activation system of the software. Surpac 6.1.2 is legal, safe, reliable, effective, efficient, and professional, while Surpac 6.1.2 Crack 6 is illegal, risky, unreliable, ineffective, inefficient, and unprofessional.

    -

    How much does Surpac 6.1.2 cost?

    -

    The price of Surpac 6.1.2 depends on the number of modules, users, and licenses that you need for your project or organization. You can contact Dassault Systemes GEOVIA or an authorized reseller to get a quote for Surpac 6.1.2.

    -

    What are the system requirements for Surpac 6.1.2?

    -

    The minimum system requirements for Surpac 6.1.2 are:

    - -

    How can I learn more about Surpac 6.1.2?

    -

    You can learn more about Surpac 6.1.2 by visiting the official website of Dassault Systemes GEOVIA, where you can find product information, features, modules, videos, tutorials, case studies, testimonials, and more. You can also join the Surpac community forum, where you can ask questions, share tips, and interact with other users and experts.

    -

    Where can I get help or support for Surpac 6.1.2?

    -

    If you are a licensed user of Surpac 6.1.2, you can get help or support from Dassault Systemes GEOVIA by contacting their customer service team via phone, email, or online chat. You can also access their online help system or user guide for more information and guidance on how to use the software.

    -

    b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Comenius Logo Magyar Letoltes Win7 64 Bit Empires Anneaux Secr.md b/spaces/1gistliPinn/ChatGPT4/Examples/Comenius Logo Magyar Letoltes Win7 64 Bit Empires Anneaux Secr.md deleted file mode 100644 index ef43d10852eb676d42a53461317c8713b46bf8d5..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Comenius Logo Magyar Letoltes Win7 64 Bit Empires Anneaux Secr.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Comenius Logo Magyar Letoltes Win7 64 Bit empires anneaux secr


    DOWNLOAD →→→ https://imgfil.com/2uy0De



    - -Read or download Furyou ni Hamerarete Jusei suru Kyonyuu Okaasan ... Comenius Logo Magyar Letoltes Win7 64 Bit empires anneaux secr 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Dead Island Riptide Lan Crack 11 TOP.md b/spaces/1gistliPinn/ChatGPT4/Examples/Dead Island Riptide Lan Crack 11 TOP.md deleted file mode 100644 index f0b20344ce178599d869f760eeab00dc0e0678cb..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Dead Island Riptide Lan Crack 11 TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    in fairness, the story is by no means terrible and it's a surprisingly emotional tale of how even the tiniest of moments can change a person. there is the common dead island issue of using disposable characters as the protagonists, which serves as a little reminder that even though you're ultimately invincible, as the games main character you still have to pay attention to your surroundings. but thats the one saving grace here. the gameplay is nothing to write home about, but at least the story is better than previous games. although the characters are so patently useless that youre inclined to want them to just get it over with and eat each other rather than fight the zombies.

    -

    dead island riptide lan crack 11


    DOWNLOADhttps://imgfil.com/2uxZIP



    -

    in either case, the characters can be bought and sold in a manner that feels like a standardized manner, with the only exception being character customization. the traditional upgrades for weapons and armor are there, but its uncertain whether they had any positive impact on gameplay or even just your sanity. while this game does fix some of the issues of its predecessor, the fact remains that these elements aren't really enough to save the game. factoring in the story that is essentially a step down from dead island, i found myself often just wanting to jump into the game for the ridiculously satisfying melee battles. once i got past the initial stages of the opening hours, i found myself anticipating the next run in against an onslaught of undead. dead island: riptide certainly has the potential to be a gripping survival action game, but when the action gets dull, what's left is an entirely empty game.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Neat Image Pro 7.0 Full Crack !!EXCLUSIVE!!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Neat Image Pro 7.0 Full Crack !!EXCLUSIVE!!.md deleted file mode 100644 index f6742269f7671321d21870be063af0af206c0b28..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Neat Image Pro 7.0 Full Crack !!EXCLUSIVE!!.md +++ /dev/null @@ -1,8 +0,0 @@ -
    -

    this is a program that utilizes the open standard format to batch process images, and is supported by many of the popular image editing software applications, including adobe photoshop elements. the program will open a folder full of images, and process all the pictures with neat image's noise reduction technology. the program is mainly aimed at advanced photographers, with a focus on digital photography. however, it can be used for any type of image, including scans of negatives and slides. it can also be used as a filtering tool for other types of image, including those created with other image editing software packages.

    -

    the program provides a lot of options, allowing you to control various aspects of the image processing. you can choose to use a preset noise reduction profile, or you can use one of the built-in profiles. you can adjust the amount of noise reduction applied, and you can choose to convert the image to black and white. you can also adjust the file type, which will determine the output format. you can save a copy of the processed image, or you can crop the image and save it. the program can be used to batch process multiple images, with the main screen showing you the progress of the overall process.

    -

    download neat image pro 7.0 full crack


    Download ✦✦✦ https://imgfil.com/2uxZ72



    -

    if you are looking for a noise reduction tool that will make your images look better, then you have come to the right place. neat image pro is a program that processes your digital images by using noise reduction technology. it will reduce the noise and grain in your pictures, making them look sharper and cleaner.

    -

    the program is the perfect all-in-one tool for your next photo shoot. it will automatically process a folder full of pictures, removing the noise and grain in them. your pictures will be ready in seconds, and you can save a copy of the processed image, or you can crop it and save it to your computer. the program will also display the noise reduction progress, so you will always know how many more pictures are left to process.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/1line/AutoGPT/autogpt/permanent_memory/__init__.py b/spaces/1line/AutoGPT/autogpt/permanent_memory/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/1phancelerku/anime-remove-background/Download Yesudas Kannada Ayyappa Songs Vol 6 MP3 for Free - New Devotional Songs 2023.md b/spaces/1phancelerku/anime-remove-background/Download Yesudas Kannada Ayyappa Songs Vol 6 MP3 for Free - New Devotional Songs 2023.md deleted file mode 100644 index 6aede79e633c8142fe257af134710825b0a38bd7..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Yesudas Kannada Ayyappa Songs Vol 6 MP3 for Free - New Devotional Songs 2023.md +++ /dev/null @@ -1,288 +0,0 @@ -
    -

    Yesudas Kannada Ayyappa Songs Vol 6 MP3 Free Download: A Complete Guide

    -

    If you are a devotee of Lord Ayyappa, the Hindu god of righteousness and celibacy, you might have heard of yesudas kannada ayyappa songs vol 6, a collection of devotional songs sung by the legendary singer K.J. Yesudas. These songs are not only melodious and soothing, but also convey the deep faith and devotion of Lord Ayyappa's followers. In this article, we will tell you everything you need to know about yesudas kannada ayyappa songs vol 6, including their history, meaning, features, and most importantly, how to download them for free.

    -

    yesudas kannada ayyappa songs vol 6 mp3 free download


    DOWNLOAD >> https://jinyurl.com/2uNTwk



    -

    What are yesudas kannada ayyappa songs vol 6?

    -

    Yesudas kannada ayyappa songs vol 6 is an album of devotional songs dedicated to Lord Ayyappa, also known as Hariharasudhan, Manikandan, Shasta, or Dharma Shasta. The album consists of 10 songs, each with a different theme and mood, but all expressing the love and reverence for Lord Ayyappa. The album was released in 1986 by Tharangini Records, under the music direction of Gangai Amaran and the lyrics by Chovalloor Krishnankutty. The album was sung by K.J. Yesudas, one of the most acclaimed singers in India, who has won numerous awards and honors for his contribution to music.

    -

    Why are they popular among devotees of Lord Ayyappa?

    -

    Yesudas kannada ayyappa songs vol 6 are popular among devotees of Lord Ayyappa for several reasons. First of all, they are sung by K.J. Yesudas, who is widely regarded as one of the best singers of devotional music in India. His voice has a unique charm and grace that captivates the listeners and transports them to a divine realm. Secondly, the songs are composed in a way that reflects the various aspects and attributes of Lord Ayyappa, such as his birth, his miracles, his temple in Sabarimala, his festivals, his blessings, and his teachings. The songs also incorporate elements from Hindu scriptures, such as Vedas, Puranas, Bhagavad Gita, etc., to enrich their meaning and significance. Thirdly, the songs are easy to sing along and remember, as they have catchy tunes and simple lyrics. They also - Continue writing the article.

    They also suit the mood and spirit of the devotees who undertake the pilgrimage to Sabarimala, the abode of Lord Ayyappa, located in the Western Ghats of Kerala. The songs inspire and motivate the devotees to follow the strict rules and rituals of the pilgrimage, such as wearing black clothes, observing celibacy, fasting, abstaining from alcohol and tobacco, etc. The songs also create a sense of unity and brotherhood among the devotees, who call each other "Ayyappa Swamy" or "Ayyappa Bhakta".

    -

    How can you download them for free?

    -

    If you are interested in downloading yesudas kannada ayyappa songs vol 6 for free, you are in luck. There are many free music download sites and apps that support yesudas kannada ayyappa songs vol 6. However, not all of them are safe and legal. Some of them may contain viruses, malware, or spyware that can harm your device or compromise your privacy. Some of them may also violate the copyright laws and infringe the rights of the original creators and owners of the songs. Therefore, you need to be careful and cautious when choosing a free music download site or app.

    -

    A brief history and background of yesudas kannada ayyappa songs vol 6

    -

    Who is K.J. Yesudas and what is his contribution to devotional music?

    -

    K.J. Yesudas, also known as Kattassery Joseph Yesudas, is an Indian singer who was born on January 10, 1940, in Fort Kochi, Kerala. He is one of the most versatile and prolific singers in India, who has sung in more than 50 languages and genres, including classical, film, folk, pop, ghazal, bhajan, etc. He has recorded more than 80,000 songs in his career spanning over six decades. He has won eight National Film Awards for Best Male Playback Singer, the most by any singer in India. He has also received numerous other awards and honors, such as Padma Shri, Padma Bhushan, Padma Vibhushan, Sangeet Natak Akademi Award, etc. He has been conferred with honorary doctorates by several universities and institutions.

    -

    K.J. Yesudas is especially known for his devotional music, which he considers as his spiritual service to God and humanity. He has sung hundreds of devotional songs in various languages and religions, such as Hinduism, Christianity, Islam, Sikhism, Jainism, Buddhism, etc. He has a special affinity for Lord Ayyappa, whom he considers as his personal deity and guru. He has sung more than 300 songs dedicated to Lord Ayyappa in different languages, such as Malayalam, Tamil, Telugu, Kannada, Hindi, etc. He is also the official singer of Harivarasanam, the lullaby song that is played every night at Sabarimala temple before closing the sanctum sanctorum.

    -

    Ayyappa devotional songs Vol. 6 by KJ Yesudas mp3 download
    -KJ Yesudas Ayyappa songs in Kannada Vol. 6 free download
    -Download Ayyappa devotional songs Vol. 6 by KJ Yesudas in Kannada
    -Free mp3 download of Ayyappa songs by KJ Yesudas in Kannada Vol. 6
    -KJ Yesudas Ayyappa devotional songs Vol. 6 Kannada mp3 download
    -Ayyappa songs Vol. 6 by KJ Yesudas Kannada free mp3 download
    -Kannada Ayyappa devotional songs by KJ Yesudas Vol. 6 download
    -Download free mp3 of Ayyappa devotional songs by KJ Yesudas in Kannada Vol. 6
    -KJ Yesudas Kannada Ayyappa songs Vol. 6 mp3 free download
    -Ayyappa devotional songs by KJ Yesudas in Kannada Vol. 6 mp3 download
    -Free download of Ayyappa songs by KJ Yesudas in Kannada Vol. 6
    -Download KJ Yesudas Ayyappa devotional songs in Kannada Vol. 6 mp3
    -Ayyappa songs by KJ Yesudas Kannada Vol. 6 free download mp3
    -KJ Yesudas Ayyappa songs Vol. 6 in Kannada mp3 download free
    -Download Ayyappa songs by KJ Yesudas Kannada Vol. 6 mp3 free
    -Free mp3 download of KJ Yesudas Ayyappa devotional songs in Kannada Vol. 6
    -Ayyappa devotional songs in Kannada by KJ Yesudas Vol. 6 free download
    -Download free mp3 of KJ Yesudas Ayyappa songs in Kannada Vol. 6
    -KJ Yesudas Kannada Ayyappa devotional songs Vol. 6 download mp3
    -Ayyappa songs in Kannada by KJ Yesudas Vol. 6 free mp3 download
    -Free download of KJ Yesudas Ayyappa devotional songs in Kannada Vol. 6 mp3
    -Download KJ Yesudas Kannada Ayyappa songs Vol. 6 mp3 free
    -Ayyappa songs by KJ Yesudas in Kannada Vol. 6 free download mp3
    -KJ Yesudas Ayyappa devotional songs in Kannada Vol. 6 free mp3 download
    -Download Ayyappa devotional songs by KJ Yesudas in Kannada Vol. 6 free mp3
    -Free mp3 download of Ayyappa songs in Kannada by KJ Yesudas Vol. 6
    -Ayyappa devotional songs in Kannada Vol. 6 by KJ Yesudas download mp3
    -Download free mp3 of Ayyappa devotional songs in Kannada by KJ Yesudas Vol. 6
    -KJ Yesudas Kannada Ayyappa songs Vol. 6 free download mp3
    -Ayyappa songs in Kannada Vol. 6 by KJ Yesudas mp3 free download
    -Free download of Ayyappa devotional songs in Kannada by KJ Yesudas Vol. 6 mp3
    -Download KJ Yesudas Kannada Ayyappa devotional songs Vol. 6 free mp3
    -Ayyappa songs in Kannada by KJ Yesudas Vol. 6 download mp3 free
    -KJ Yesudas Ayyappa devotional songs in Kannada Vol. 6 download free mp3
    -Download Ayyappa songs in Kannada by KJ Yesudas Vol. 6 free mp3
    -Free mp3 download of Ayyappa devotional songs in Kannada by KJ Yesudas Vol. 6
    -Aanayirangum maamalayil song from Ayyappa devotional songs Vol. 6 by KJ Yesudas in Kannada mp3 free download
    -Udichuyarnnu maamalamele song from Ayyappa devotional songs Vol. 6 by KJ Yesudas in Kannada free download
    -Kaananavasa kaliyuga song from Ayyappa devotional songs Vol. 6 by KJ Yesudas in Kannada free mp3 download
    -Akhilaanda brahmathin song from Ayyappa devotional songs Vol. 6 by KJ Yesudas in Kannada mp3 download
    -Mahaa prabho song from Ayyappa devotional songs Vol. 6 by KJ Yesudas in Kannada free download
    -Vrishchika pularvela song from Ayyappa devotional songs

    -

    What is the significance and meaning of ayyappa songs and harivarasanam?

    -

    Ayyappa songs are devotional songs that praise and worship Lord Ayyappa, his attributes, his deeds, his miracles, his devotees, - Continue writing the article.

    his devotees, his temple, his festivals, his blessings, and his teachings. They are sung by the devotees as a way of expressing their love, gratitude, devotion, and surrender to Lord Ayyappa. They also seek the protection, guidance, and grace of Lord Ayyappa in their lives. Ayyappa songs are usually sung during the pilgrimage season to Sabarimala, which lasts from November to January every year. They are also sung during other occasions, such as Ayyappa Jayanti, Makara Sankranti, Guruvayur Ekadasi, etc.

    -

    Harivarasanam is a special ayyappa song that is considered as the most sacred and important one. It is a Sanskrit hymn that was composed by Kumbakudi Kulathur Iyer in the 1940s. It is a lullaby song that praises Lord Ayyappa as the king of Hariharaputra (the son of Vishnu and Shiva) and requests him to rest after a long day of protecting his devotees. It is sung by K.J. Yesudas every night at Sabarimala temple before closing the doors of the sanctum sanctorum. It is believed that Lord Ayyappa listens to Harivarasanam and goes to sleep peacefully. The devotees also listen to Harivarasanam and feel the presence and blessings of Lord Ayyappa in their hearts.

    -

    When and how were yesudas kannada ayyappa songs vol 6 released and composed?

    -

    Yesudas kannada ayyappa songs vol 6 were released in 1986 by Tharangini Records, a music company founded by K.J. Yesudas in 1980. Tharangini Records was the first music company in Kerala to produce and distribute devotional music albums. It has produced more than 1000 albums in various languages and genres, including classical, film, folk, pop, ghazal, bhajan, etc. Tharangini Records has also been instrumental in promoting new talents and preserving traditional music forms.

    -

    Yesudas kannada ayyappa songs vol 6 were composed by Gangai Amaran, a Tamil music director, singer, lyricist, writer, and actor. He is the younger brother of the famous music director Ilayaraja. He has composed music for more than 100 films in Tamil, Telugu, Kannada, Malayalam, and Hindi languages. He has also written lyrics for more than 200 songs in various languages. He is known for his simple and catchy tunes that appeal to the masses.

    -

    The lyrics of yesudas kannada ayyappa songs vol 6 were written by Chovalloor Krishnankutty, a Malayalam poet and lyricist. He has written lyrics for more than 500 songs in Malayalam, Tamil, Kannada, Telugu, and Hindi languages. He has also written poems, novels, short stories, essays, etc. He is known for his poetic and philosophical style that reflects his deep knowledge and understanding of Hindu scriptures and culture.

    -

    A review and analysis of yesudas kannada ayyappa songs vol 6

    -

    What are the main features and highlights of the album?

    -

    Yesudas kannada ayyappa songs vol 6 is an album that showcases the versatility and excellence of K.J. Yesudas as a singer of devotional music. The album has 10 songs that cover different themes and aspects of Lord Ayyappa's life and worship. The songs are sung in Kannada language with a touch of Sanskrit words and phrases. The songs have a blend of classical and folk music styles that suit the mood and tone of the lyrics. The songs have a soothing and melodious quality that creates a spiritual atmosphere for the listeners.

    -

    The album also features the voice of Gangai Amaran as a co-singer in some of the songs. He adds a different flavor and dimension to the songs with his distinctive voice and style. He also provides some narration and commentary in between the songs to explain their meaning and context.

    -

    The album also has a table of contents that lists the names of the songs along with their duration and track number. The table also provides some information about the singers, - Continue writing the article.

    The table also provides some information about the singers, music director, lyricist, and producer of the album. The table is helpful for the listeners who want to know more about the album and its creators.

    -

    Here is the table of contents of yesudas kannada ayyappa songs vol 6:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Continue writing the article. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Continue writing the article. - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Song NameDurationTrack NumberSingersMusic DirectorLyricistProducer
    Ayyappa Sharanam5:121K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Hariharasudha Sharanam4:552K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Swamyge Namaha4:483K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve 5:024K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve Neeve5:024K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Ninna Namave Chanda4:585K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Ninna Padave Chanda4:546K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Ninna Poojege Bande Mahadeshwara5:067K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Swamyge Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya 5:108K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Swamyge Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya Jaya5:108K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Ayyappa Swamyge Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam Mangalam 5:149K.J. Yesudas, Gangai AmaranGangai AmaranChovalloor KrishnankuttyK.J. Yesudas
    Harivarasanam Vishwamohanam Haridadhiswaram Aaradhyapadhukam Arivimardhanam Nithyanarthanam Hariharatmajam Devamashreye2:4210K.J. YesudasGangai AmaranKumbakudi Kulathur IyerK.J. Yesudas
    -

    How do the songs reflect the devotion and faith of Lord Ayyappa's followers?

    -

    The songs of yesudas kannada ayyappa songs vol 6 reflect the devotion and faith of Lord Ayyappa's followers in various ways. The songs praise Lord Ayyappa as the supreme lord, the protector, the savior, the friend, the teacher, and the father of his devotees. The songs also describe the glory and beauty of Lord Ayyappa, his temple, his mount, his weapons, his symbols, his ornaments, and his attributes. The songs also narrate the stories and legends of Lord Ayyappa, such as his birth, his childhood, his miracles, his battles, his marriage, his renunciation, and his enlightenment. The songs also express the emotions and feelings of the devotees, such as their joy, their sorrow, their gratitude, their surrender, their longing, their hope, and their love for Lord Ayyappa.

    -

    What are some of the best songs and lyrics from the album?

    -

    The album of yesudas kannada ayyappa songs vol 6 has many beautiful and meaningful songs and lyrics that can touch the hearts of the listeners. However, some of the best songs and lyrics from the album are:

    - -

    A guide on how to download yesudas kannada ayyappa songs vol 6 for free

    -

    What are some of the best free music download sites and apps that support yesudas kannada ayyappa songs vol 6?

    -

    There are many free music download sites and apps that support yesudas kannada ayyappa songs vol 6, but some of the best ones are:

    - -

    What are the steps and tips to download the songs safely and legally?

    -

    To download the songs of yesudas kannada ayyappa songs vol 6 safely and legally, you need to follow these steps and tips:

    -
      -
    1. Choose a reliable and reputable free music download site or app that supports yesudas kannada ayyappa songs vol 6, such as YouTube, Gaana, or Wynk Music.
    2. -
    3. Make sure that you have a stable internet connection and enough storage space on your device.
    4. -
    5. Search for the songs or the album that you want to download by typing their names or keywords in the search bar.
    6. -
    7. Select the song or the album that you want to download from the search results or from the album page.
    8. -
    9. Click on the download button or icon that appears next to the song or on the album page.
    10. -
    11. Choose the quality and format that you want to download, such as MP3, MP4, etc.
    12. -
    13. Wait for the download to complete and check if it is successful.
    14. -
    15. Enjoy and share your downloaded music with your friends and family.
    16. -
    -

    Some tips to download the songs safely and legally are:

    - Continue writing the article.

    Some tips to download the songs safely and legally are:

    - -

    How can you enjoy and share the songs offline and online?

    -

    Once you have downloaded the songs of yesudas kannada ayyappa songs vol 6, you can enjoy and share them offline and online in various ways. Some of them are:

    - -

    Conclusion

    -

    In conclusion, yesudas kannada ayyappa songs vol 6 is an amazing album of devotional songs dedicated to Lord Ayyappa. The album is sung by K.J. Yesudas, one of the best singers of devotional music in India. The album has 10 songs that cover different themes and aspects of Lord Ayyappa's life and worship. The album has a soothing and melodious quality that creates a spiritual atmosphere for the listeners. The album is also easy to download for free from various free music download sites and apps that support yesudas kannada ayyappa songs vol 6. The album is also easy to enjoy and share offline and online with your friends and family.

    -

    If you are a devotee of Lord Ayyappa or a fan of K.J. Yesudas, you should definitely download and listen to yesudas kannada ayyappa songs vol 6. You will not regret it. You will feel the presence and blessings of Lord Ayyappa in your heart. You will also appreciate and admire the talent and dedication of K.J. Yesudas as a singer of devotional music. You will also learn more about Lord Ayyappa's history, culture, and teachings.

    -

    So what are you waiting for? Download yesudas kannada ayyappa songs vol 6 for free today and enjoy the divine music of Lord Ayyappa. You will be glad you did.

    -

    Thank you for reading this article. We hope you found it useful and informative. If you have any questions or feedback, please feel free to contact us. We would love to hear from you.

    -

    FAQs

    -

    Here are some frequently asked questions about yesudas kannada ayyappa songs vol 6:

    -
      -
    1. Where can I find more information about Lord Ayyappa and his temple in Sabarimala?
    2. -

      You can find more information about Lord Ayyappa and his temple in Sabarimala from various sources, such as books, magazines, websites, blogs, podcasts, videos, etc. Some of the best sources are:

      - -
    3. How can I support K.J. Yesudas and his music career?
    4. -

      You can support K.J. Yesudas and his music career in various ways, such as:

      - -
    5. What are some other devotional albums by K.J. Yesudas that I can download for free?
    6. -

      Some other devotional albums by K.J. Yesudas that you can download for free are:

      - -
    7. How can I convert the songs to different formats or edit them according to my preferences?
    8. -

      You can convert the songs to different formats or edit them according to your preferences using various tools and methods, such as:

      - -
    9. How can I donate or contribute to the welfare of Lord Ayyappa's devotees?
    10. -

      You can donate or contribute to the welfare of Lord Ayyappa's devotees in various ways, such as:

      -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/AHzizi/WaifuVoiceGen/mel_processing.py b/spaces/AHzizi/WaifuVoiceGen/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/AHzizi/WaifuVoiceGen/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/AIFILMS/StyleGANEX/scripts/generate_sketch_data.py b/spaces/AIFILMS/StyleGANEX/scripts/generate_sketch_data.py deleted file mode 100644 index a13acf949bf2efb3449f13922b7489e5c06880a3..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/scripts/generate_sketch_data.py +++ /dev/null @@ -1,62 +0,0 @@ -from torchvision import transforms -from torchvision.utils import save_image -from torch.utils.serialization import load_lua -import os -import cv2 -import numpy as np - -""" -NOTE!: Must have torch==0.4.1 and torchvision==0.2.1 -The sketch simplification model (sketch_gan.t7) from Simo Serra et al. can be downloaded from their official implementation: - https://github.com/bobbens/sketch_simplification -""" - - -def sobel(img): - opImgx = cv2.Sobel(img, cv2.CV_8U, 0, 1, ksize=3) - opImgy = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize=3) - return cv2.bitwise_or(opImgx, opImgy) - - -def sketch(frame): - frame = cv2.GaussianBlur(frame, (3, 3), 0) - invImg = 255 - frame - edgImg0 = sobel(frame) - edgImg1 = sobel(invImg) - edgImg = cv2.addWeighted(edgImg0, 0.75, edgImg1, 0.75, 0) - opImg = 255 - edgImg - return opImg - - -def get_sketch_image(image_path): - original = cv2.imread(image_path) - original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY) - sketch_image = sketch(original) - return sketch_image[:, :, np.newaxis] - - -use_cuda = True - -cache = load_lua("/path/to/sketch_gan.t7") -model = cache.model -immean = cache.mean -imstd = cache.std -model.evaluate() - -data_path = "/path/to/data/imgs" -images = [os.path.join(data_path, f) for f in os.listdir(data_path)] - -output_dir = "/path/to/data/edges" -if not os.path.exists(output_dir): - os.makedirs(output_dir) - -for idx, image_path in enumerate(images): - if idx % 50 == 0: - print("{} out of {}".format(idx, len(images))) - data = get_sketch_image(image_path) - data = ((transforms.ToTensor()(data) - immean) / imstd).unsqueeze(0) - if use_cuda: - pred = model.cuda().forward(data.cuda()).float() - else: - pred = model.forward(data) - save_image(pred[0], os.path.join(output_dir, "{}_edges.jpg".format(image_path.split("/")[-1].split('.')[0]))) diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/pann_model.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/pann_model.py deleted file mode 100644 index 109db5f418a0bad32cae2452742589ff52a19b85..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/pann_model.py +++ /dev/null @@ -1,543 +0,0 @@ -# PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition -# Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn -# Some layers are re-designed for CLAP -import os -os.environ['NUMBA_CACHE_DIR'] = '/tmp/' - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchlibrosa.stft import Spectrogram, LogmelFilterBank -from torchlibrosa.augmentation import SpecAugmentation - -from .utils import do_mixup, interpolate, pad_framewise_output -from .feature_fusion import iAFF, AFF, DAF - - -def init_layer(layer): - """Initialize a Linear or Convolutional layer. """ - nn.init.xavier_uniform_(layer.weight) - - if hasattr(layer, 'bias'): - if layer.bias is not None: - layer.bias.data.fill_(0.) - - -def init_bn(bn): - """Initialize a Batchnorm layer. """ - bn.bias.data.fill_(0.) - bn.weight.data.fill_(1.) - - -class ConvBlock(nn.Module): - def __init__(self, in_channels, out_channels): - - super(ConvBlock, self).__init__() - - self.conv1 = nn.Conv2d(in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), stride=(1, 1), - padding=(1, 1), bias=False) - - self.conv2 = nn.Conv2d(in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), stride=(1, 1), - padding=(1, 1), bias=False) - - self.bn1 = nn.BatchNorm2d(out_channels) - self.bn2 = nn.BatchNorm2d(out_channels) - - self.init_weight() - - def init_weight(self): - init_layer(self.conv1) - init_layer(self.conv2) - init_bn(self.bn1) - init_bn(self.bn2) - - - def forward(self, input, pool_size=(2, 2), pool_type='avg'): - - x = input - x = F.relu_(self.bn1(self.conv1(x))) - x = F.relu_(self.bn2(self.conv2(x))) - if pool_type == 'max': - x = F.max_pool2d(x, kernel_size=pool_size) - elif pool_type == 'avg': - x = F.avg_pool2d(x, kernel_size=pool_size) - elif pool_type == 'avg+max': - x1 = F.avg_pool2d(x, kernel_size=pool_size) - x2 = F.max_pool2d(x, kernel_size=pool_size) - x = x1 + x2 - else: - raise Exception('Incorrect argument!') - - return x - - -class ConvBlock5x5(nn.Module): - def __init__(self, in_channels, out_channels): - - super(ConvBlock5x5, self).__init__() - - self.conv1 = nn.Conv2d(in_channels=in_channels, - out_channels=out_channels, - kernel_size=(5, 5), stride=(1, 1), - padding=(2, 2), bias=False) - - self.bn1 = nn.BatchNorm2d(out_channels) - - self.init_weight() - - def init_weight(self): - init_layer(self.conv1) - init_bn(self.bn1) - - - def forward(self, input, pool_size=(2, 2), pool_type='avg'): - - x = input - x = F.relu_(self.bn1(self.conv1(x))) - if pool_type == 'max': - x = F.max_pool2d(x, kernel_size=pool_size) - elif pool_type == 'avg': - x = F.avg_pool2d(x, kernel_size=pool_size) - elif pool_type == 'avg+max': - x1 = F.avg_pool2d(x, kernel_size=pool_size) - x2 = F.max_pool2d(x, kernel_size=pool_size) - x = x1 + x2 - else: - raise Exception('Incorrect argument!') - - return x - - -class AttBlock(nn.Module): - def __init__(self, n_in, n_out, activation='linear', temperature=1.): - super(AttBlock, self).__init__() - - self.activation = activation - self.temperature = temperature - self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True) - self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True) - - self.bn_att = nn.BatchNorm1d(n_out) - self.init_weights() - - def init_weights(self): - init_layer(self.att) - init_layer(self.cla) - init_bn(self.bn_att) - - def forward(self, x): - # x: (n_samples, n_in, n_time) - norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1) - cla = self.nonlinear_transform(self.cla(x)) - x = torch.sum(norm_att * cla, dim=2) - return x, norm_att, cla - - def nonlinear_transform(self, x): - if self.activation == 'linear': - return x - elif self.activation == 'sigmoid': - return torch.sigmoid(x) - - -class Cnn14(nn.Module): - def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, - fmax, classes_num, enable_fusion=False, fusion_type='None'): - - super(Cnn14, self).__init__() - - window = 'hann' - center = True - pad_mode = 'reflect' - ref = 1.0 - amin = 1e-10 - top_db = None - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size, - win_length=window_size, window=window, center=center, pad_mode=pad_mode, - freeze_parameters=True) - - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size, - n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db, - freeze_parameters=True) - - # Spec augmenter - self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, - freq_drop_width=8, freq_stripes_num=2) - - self.bn0 = nn.BatchNorm2d(64) - - if (self.enable_fusion) and (self.fusion_type == 'channel_map'): - self.conv_block1 = ConvBlock(in_channels=4, out_channels=64) - else: - self.conv_block1 = ConvBlock(in_channels=1, out_channels=64) - self.conv_block2 = ConvBlock(in_channels=64, out_channels=128) - self.conv_block3 = ConvBlock(in_channels=128, out_channels=256) - self.conv_block4 = ConvBlock(in_channels=256, out_channels=512) - self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024) - self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048) - - self.fc1 = nn.Linear(2048, 2048, bias=True) - self.fc_audioset = nn.Linear(2048, classes_num, bias=True) - - if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']): - self.mel_conv1d = nn.Sequential( - nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2), - nn.BatchNorm1d(64) # No Relu - ) - if self.fusion_type == 'daf_1d': - self.fusion_model = DAF() - elif self.fusion_type == 'aff_1d': - self.fusion_model = AFF(channels=64, type='1D') - elif self.fusion_type == 'iaff_1d': - self.fusion_model = iAFF(channels=64, type='1D') - - if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']): - self.mel_conv2d = nn.Sequential( - nn.Conv2d(1, 64, kernel_size=(5,5), stride=(6, 2), padding=(2,2)), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True) - ) - - if self.fusion_type == 'daf_2d': - self.fusion_model = DAF() - elif self.fusion_type == 'aff_2d': - self.fusion_model = AFF(channels=64, type='2D') - elif self.fusion_type == 'iaff_2d': - self.fusion_model = iAFF(channels=64, type='2D') - self.init_weight() - - def init_weight(self): - init_bn(self.bn0) - init_layer(self.fc1) - init_layer(self.fc_audioset) - - def forward(self, input, mixup_lambda=None, device=None): - """ - Input: (batch_size, data_length)""" - - if self.enable_fusion and input["longer"].sum() == 0: - # if no audio is longer than 10s, then randomly select one audio to be longer - input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True - - if not self.enable_fusion: - x = self.spectrogram_extractor(input['waveform'].to(device=device, non_blocking=True)) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - else: - longer_list = input["longer"].to(device=device, non_blocking=True) - x = input["mel_fusion"].to(device=device, non_blocking=True) - longer_list_idx = torch.where(longer_list)[0] - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']: - new_x = x[:,0:1,:,:].clone().contiguous() - # local processing - if len(longer_list_idx) > 0: - fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous() - FB,FC,FT,FF = fusion_x_local.size() - fusion_x_local = fusion_x_local.view(FB * FC, FT, FF) - fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous() - fusion_x_local = self.mel_conv1d(fusion_x_local) - fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1)) - fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2) - if fusion_x_local.size(-1) < FT: - fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1) - else: - fusion_x_local = fusion_x_local[:,:,:FT] - # 1D fusion - new_x = new_x.squeeze(1).permute((0,2,1)).contiguous() - new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local) - x = new_x.permute((0,2,1)).contiguous()[:,None,:,:] - else: - x = new_x - elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']: - x = x # no change - - if self.training: - x = self.spec_augmenter(x) - # Mixup on spectrogram - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']): - global_x = x[:,0:1,:,:] - - # global processing - B, C, H, W = global_x.shape - global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type='avg') - if len(longer_list_idx) > 0: - local_x = x[longer_list_idx,1:,:,:].contiguous() - TH = global_x.size(-2) - # local processing - B, C, H, W = local_x.shape - local_x = local_x.view(B*C,1,H,W) - local_x = self.mel_conv2d(local_x) - local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3)) - local_x = local_x.permute((0,2,1,3,4)).contiguous().flatten(2,3) - TB,TC,_,TW = local_x.size() - if local_x.size(-2) < TH: - local_x = torch.cat([local_x, torch.zeros((TB,TC,TH-local_x.size(-2),TW), device=global_x.device)], dim=-2) - else: - local_x = local_x[:,:,:TH,:] - - global_x[longer_list_idx] = self.fusion_model(global_x[longer_list_idx],local_x) - x = global_x - else: - x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg') - - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = torch.mean(x, dim=3) - - latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x = latent_x1 + latent_x2 - latent_x = latent_x.transpose(1, 2) - latent_x = F.relu_(self.fc1(latent_x)) - latent_output = interpolate(latent_x, 32) - - - (x1, _) = torch.max(x, dim=2) - x2 = torch.mean(x, dim=2) - x = x1 + x2 - x = F.dropout(x, p=0.5, training=self.training) - x = F.relu_(self.fc1(x)) - embedding = F.dropout(x, p=0.5, training=self.training) - clipwise_output = torch.sigmoid(self.fc_audioset(x)) - - output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output} - return output_dict - - -class Cnn6(nn.Module): - def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, - fmax, classes_num, enable_fusion=False, fusion_type='None'): - - super(Cnn6, self).__init__() - - window = 'hann' - center = True - pad_mode = 'reflect' - ref = 1.0 - amin = 1e-10 - top_db = None - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size, - win_length=window_size, window=window, center=center, pad_mode=pad_mode, - freeze_parameters=True) - - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size, - n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db, - freeze_parameters=True) - - # Spec augmenter - self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, - freq_drop_width=8, freq_stripes_num=2) - - self.bn0 = nn.BatchNorm2d(64) - - self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64) - self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128) - self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256) - self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512) - - self.fc1 = nn.Linear(512, 512, bias=True) - self.fc_audioset = nn.Linear(512, classes_num, bias=True) - - self.init_weight() - - def init_weight(self): - init_bn(self.bn0) - init_layer(self.fc1) - init_layer(self.fc_audioset) - - def forward(self, input, mixup_lambda=None, device=None): - """ - Input: (batch_size, data_length)""" - - x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - - if self.training: - x = self.spec_augmenter(x) - - # Mixup on spectrogram - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - - x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = torch.mean(x, dim=3) - - latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x = latent_x1 + latent_x2 - latent_x = latent_x.transpose(1, 2) - latent_x = F.relu_(self.fc1(latent_x)) - latent_output = interpolate(latent_x, 16) - - (x1, _) = torch.max(x, dim=2) - x2 = torch.mean(x, dim=2) - x = x1 + x2 - x = F.dropout(x, p=0.5, training=self.training) - x = F.relu_(self.fc1(x)) - embedding = F.dropout(x, p=0.5, training=self.training) - clipwise_output = torch.sigmoid(self.fc_audioset(x)) - - output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output} - - return output_dict - - -class Cnn10(nn.Module): - def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, - fmax, classes_num, enable_fusion=False, fusion_type='None'): - - super(Cnn10, self).__init__() - - window = 'hann' - center = True - pad_mode = 'reflect' - ref = 1.0 - amin = 1e-10 - top_db = None - - self.enable_fusion = enable_fusion - self.fusion_type = fusion_type - - # Spectrogram extractor - self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size, - win_length=window_size, window=window, center=center, pad_mode=pad_mode, - freeze_parameters=True) - - # Logmel feature extractor - self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size, - n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db, - freeze_parameters=True) - - # Spec augmenter - self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, - freq_drop_width=8, freq_stripes_num=2) - - self.bn0 = nn.BatchNorm2d(64) - - self.conv_block1 = ConvBlock(in_channels=1, out_channels=64) - self.conv_block2 = ConvBlock(in_channels=64, out_channels=128) - self.conv_block3 = ConvBlock(in_channels=128, out_channels=256) - self.conv_block4 = ConvBlock(in_channels=256, out_channels=512) - self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024) - - self.fc1 = nn.Linear(1024, 1024, bias=True) - self.fc_audioset = nn.Linear(1024, classes_num, bias=True) - - self.init_weight() - - def init_weight(self): - init_bn(self.bn0) - init_layer(self.fc1) - init_layer(self.fc_audioset) - - def forward(self, input, mixup_lambda=None, device=None): - """ - Input: (batch_size, data_length)""" - - x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins) - x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) - - x = x.transpose(1, 3) - x = self.bn0(x) - x = x.transpose(1, 3) - - if self.training: - x = self.spec_augmenter(x) - - # Mixup on spectrogram - if self.training and mixup_lambda is not None: - x = do_mixup(x, mixup_lambda) - - x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg') - x = F.dropout(x, p=0.2, training=self.training) - x = torch.mean(x, dim=3) - - latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1) - latent_x = latent_x1 + latent_x2 - latent_x = latent_x.transpose(1, 2) - latent_x = F.relu_(self.fc1(latent_x)) - latent_output = interpolate(latent_x, 32) - - (x1, _) = torch.max(x, dim=2) - x2 = torch.mean(x, dim=2) - x = x1 + x2 - x = F.dropout(x, p=0.5, training=self.training) - x = F.relu_(self.fc1(x)) - embedding = F.dropout(x, p=0.5, training=self.training) - clipwise_output = torch.sigmoid(self.fc_audioset(x)) - - output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output} - - return output_dict - - -def create_pann_model(audio_cfg, enable_fusion=False, fusion_type='None'): - try: - ModelProto = eval(audio_cfg.model_name) - model = ModelProto( - sample_rate = audio_cfg.sample_rate, - window_size = audio_cfg.window_size, - hop_size =audio_cfg.hop_size, - mel_bins = audio_cfg.mel_bins, - fmin = audio_cfg.fmin, - fmax = audio_cfg.fmax, - classes_num = audio_cfg.class_num, - enable_fusion = enable_fusion, - fusion_type = fusion_type - ) - return model - except: - raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.') - diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/ddpm_audio_inpaint.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/ddpm_audio_inpaint.py deleted file mode 100644 index 1541a74cd3082d8b44ba7a7988aeb65c2dd84a24..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/ddpm_audio_inpaint.py +++ /dev/null @@ -1,1081 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" -import os -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.ddpm import DDPM, disabled_train - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - -# add mel_dim and mel_length params to ensure correct shape -class LatentDiffusion_audioinpaint(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - mel_dim=80, - mel_length=848, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - test_repeat=1, - test_numsteps = None, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.test_repeat = test_repeat - if test_numsteps == None: - self.test_numsteps = self.num_timesteps - self.concat_mode = concat_mode - self.mel_dim = mel_dim - self.mel_length = mel_length - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__":# for no_text inpainting task - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__":# for unconditional image generation such as human face、ImageNet - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior):# encode_emb from autoencoder - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None:# 'crossattn' for txt2image, 'hybird' for txt_inpaint - if cond_key is None: - cond_key = self.cond_stage_key # 'caption' for txt_inpaint - if self.model.conditioning_key == 'hybrid': - xc = {} - assert cond_key == 'caption' # only txt_inpaint is implemented now - assert 'masked_image' in batch.keys() - assert 'mask' in batch.keys() - masked_image = super().get_input(batch,'masked_image') - mask = super().get_input(batch,'mask') - if bs is not None: - masked_image,mask = masked_image[:bs],mask[:bs] - masked_image,mask = masked_image.to(self.device),mask.to(self.device) - masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach() - resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:]) - xc['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1) - xc[cond_key] = batch[cond_key] - else: - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: - xc = batch[cond_key] - elif cond_key == 'class_label': - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else:# cond_key == 'image' - xc = x - if not self.cond_stage_trainable or force_c_encode:# cond_stage_trainable is true for txt2img,force_c_encoder = True,when called in log_images - if isinstance(xc, list): - # import pudb; pudb.set_trace() - c = self.get_learned_conditioning(xc)# 因为log_images内接下来要调用sample_log,所以需要预先得到处理好的c - if isinstance(xc, dict): - c = {} - c['c_concat'] = xc['c_concat'] - c['c_crossattn'] = self.get_learned_conditioning(xc[cond_key]) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - if isinstance(c,dict): - for k in c.keys(): - c[k] = c[k][:bs] - else: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key)# get latent and condition - loss = self(x, c) - return loss - - def test_step(self,batch,batch_idx): - # TODO make self.test_repeat work - cond = {} - cond[self.cond_stage_key] = batch[self.cond_stage_key] - cond[self.cond_stage_key] = self.get_learned_conditioning(cond[self.cond_stage_key]) # c: string -> [B, T, Context_dim] - cond['c_crossattn'] = cond.pop(self.cond_stage_key) - masked_image = super().get_input(batch,'masked_image') - mask = super().get_input(batch,'mask') - masked_image,mask = masked_image.to(self.device),mask.to(self.device) - masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach() - resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:]) - cond['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1) - batch_size = len(batch[self.cond_stage_key]) - # shape = [batch_size,self.channels,self.mel_dim,self.mel_length] - enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps) - xrec = self.decode_first_stage(enc_emb) - reconstructions = (xrec + 1)/2 # to mel scale - test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path) - savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class') - if not os.path.exists(savedir): - os.makedirs(savedir) - - file_names = batch['f_name'] - nfiles = len(file_names) - reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim - for k in range(reconstructions.shape[0]): - b,repeat = k % nfiles, k // nfiles - vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num - v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:] - save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition - np.save(save_img_path,reconstructions[b]) - - return None - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - if isinstance(c,dict): - c[self.cond_stage_key] = self.get_learned_conditioning(c[self.cond_stage_key]) - c['c_crossattn'] = c.pop(self.cond_stage_key) - else: - c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim] - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - # make values to list to enable concat operation in - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict. (txt2inpaint) - cond_tmp = {}# use cond_tmp to avoid inplace edit - for k,v in cond.items(): - if not isinstance(v, list): - cond_tmp[k] = [cond[k]] - else: - cond_tmp[k] = cond[k] - cond = cond_tmp - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - # x_noisy is tensor with shape [b,c,mel_len,T] - # if condition is caption ,cond['c_crossattn'] is a list, each item shape is [1, 77, 1280] - x_recon = self.model(x_noisy, t, **cond)# tensor with shape [b,c,mel_len,T] - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.mel_dim, self.mel_length) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.mel_dim, self.mel_length) - samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, - shape,cond,verbose=False,**kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True,**kwargs) - - return samples, intermediates - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, **kwargs): - - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x # 原始输入图像 - log["reconstruction"] = xrec # 重建得到的图像 - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"):# when cond_stage is first_stage. (bert embedder doesnot have decode) - xc = self.cond_stage_model.decode(c)# decoded masked image - log["conditioning"] = xc # 重建后的图像 - elif self.cond_stage_key in ["caption"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) - log["conditioning"] = xc # 含有文本的图像 - if self.model.conditioning_key == 'hybrid': - log["decoded_maskedimg"] = self.first_stage_model.decode(c['c_concat'][:,:self.first_stage_model.embed_dim])# c_concat is the concat result of masked_img latent and resized mask. get latent here to decode - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['conditioning'] = xc # 文本为类标签的图像 - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows:# diffusion每一步的图像 - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample:# - # get denoise row - with self.ema_scope("Plotting"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with self.ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...]# N,1,H,W - with self.ema_scope("Plotting Inpaint"): - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - with self.ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.mel_dim, self.mel_length), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - diff --git a/spaces/Aditya9790/yolo7-object-tracking/utils/autoanchor.py b/spaces/Aditya9790/yolo7-object-tracking/utils/autoanchor.py deleted file mode 100644 index f491032e53ab43cd81d966d127bd92f9b414b9fe..0000000000000000000000000000000000000000 --- a/spaces/Aditya9790/yolo7-object-tracking/utils/autoanchor.py +++ /dev/null @@ -1,160 +0,0 @@ -# Auto-anchor utils - -import numpy as np -import torch -import yaml -from scipy.cluster.vq import kmeans -from tqdm import tqdm - -from utils.general import colorstr - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary - a = m.anchor_grid.prod(-1).view(-1) # anchor area - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order - print('Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - m.anchor_grid[:] = m.anchor_grid.flip(0) - - -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - prefix = colorstr('autoanchor: ') - print(f'\n{prefix}Analyzing anchors... ', end='') - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1. / thr).float().mean() # best possible recall - return bpr, aat - - anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors - bpr, aat = metric(anchors) - print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') - na = m.anchor_grid.numel() // 2 # number of anchors - try: - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - except Exception as e: - print(f'{prefix}ERROR: {e}') - new_bpr = metric(anchors)[0] - if new_bpr > bpr: # replace anchors - anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference - check_anchor_order(m) - m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') - else: - print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline - - -def kmean_anchors(path='./data/coco.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - path: path to dataset *.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.autoanchor import *; _ = kmean_anchors() - """ - thr = 1. / thr - prefix = colorstr('autoanchor: ') - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') - print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' - f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') - for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg - return k - - if isinstance(path, str): # *.yaml file - with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict - from utils.datasets import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - else: - dataset = path # dataset - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels - # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - - # Kmeans calculation - print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') - s = wh.std(0) # sigmas for whitening - k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') - k *= s - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.savefig('wh.png', dpi=200) - - # Evolve - npr = np.random - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' - if verbose: - print_results(k) - - return print_results(k) diff --git a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/base.py b/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/base.py deleted file mode 100644 index abd21287557fffe78df2d016fc5219eebc236085..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/updater/base.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, List, Tuple - -from pydantic import BaseModel - -# from agentverse.agents import Agent -from abc import abstractmethod - -from . import updater_registry as UpdaterRegistry - -if TYPE_CHECKING: - from agentverse.environments import BaseEnvironment - - -@UpdaterRegistry.register("base") -class BaseUpdater(BaseModel): - """ - The base class of updater class. - """ - - @abstractmethod - def update_memory(self, environment: BaseEnvironment): - pass - - def reset(self): - pass diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetChildrenHeight.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetChildrenHeight.js deleted file mode 100644 index 6ce92c6e53ee65733b153aff51598dfb8e19dcd6..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetChildrenHeight.js +++ /dev/null @@ -1,6 +0,0 @@ -// Override -var GetChildrenHeight = function () { - return 0; -} - -export default GetChildrenHeight; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ResolveWidth.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ResolveWidth.js deleted file mode 100644 index b1e54851f803c0e29b8fdb502df966a02b602601..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ResolveWidth.js +++ /dev/null @@ -1,16 +0,0 @@ -var ResolveWidth = function (width) { - if (width === undefined) { - width = Math.max(this.childrenWidth, this.minWidth); - } else { - /* - var minWidth = Math.max(this.childrenWidth, this.minWidth); - if (minWidth > width) { - // Warning - } - */ - } - - return width; -} - -export default ResolveWidth; \ No newline at end of file diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/vits/losses.py b/spaces/Akmyradov/TurkmenTTSweSTT/vits/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/TurkmenTTSweSTT/vits/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/install.md b/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/install.md deleted file mode 100644 index 6314a40441285e9236438e468caf8b71a407531a..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/docs/install.md +++ /dev/null @@ -1,51 +0,0 @@ -## v1.8.0 -### Linux and Windows -```shell -# CUDA 11.0 -pip --default-timeout=100 install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip --default-timeout=100 install torch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 - -# CPU only -pip --default-timeout=100 install torch==1.8.0+cpu torchvision==0.9.0+cpu torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html - -``` - - -## v1.7.1 -### Linux and Windows -```shell -# CUDA 11.0 -pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 - -# CUDA 10.1 -pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.7.1+cu92 torchvision==0.8.2+cu92 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.7.1+cpu torchvision==0.8.2+cpu torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html -``` - - -## v1.6.0 - -### Linux and Windows -```shell -# CUDA 10.2 -pip install torch==1.6.0 torchvision==0.7.0 - -# CUDA 10.1 -pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.6.0+cu92 torchvision==0.7.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -``` \ No newline at end of file diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/shanghainese.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ᴇ'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'ᴇf'), - ('G', 'dʑi'), - ('H', 'ᴇtɕʰ'), - ('I', 'ᴀi'), - ('J', 'dʑᴇ'), - ('K', 'kʰᴇ'), - ('L', 'ᴇl'), - ('M', 'ᴇm'), - ('N', 'ᴇn'), - ('O', 'o'), - ('P', 'pʰi'), - ('Q', 'kʰiu'), - ('R', 'ᴀl'), - ('S', 'ᴇs'), - ('T', 'tʰi'), - ('U', 'ɦiu'), - ('V', 'vi'), - ('W', 'dᴀbɤliu'), - ('X', 'ᴇks'), - ('Y', 'uᴀi'), - ('Z', 'zᴇ') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('一十','十').replace('二十', '廿').replace('二', '两') - return re.sub(r'((?:^|[^三四五六七八九])十|廿)两', r'\1二', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/Amrrs/portfolio-github/README.md b/spaces/Amrrs/portfolio-github/README.md deleted file mode 100644 index d8e4a0ac5fc94bcb0e76b818940eb91347fab5dd..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/portfolio-github/README.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Portfolio Github -emoji: 🌖 -colorFrom: blue -colorTo: green -sdk: static -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/consistency_models/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/consistency_models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py deleted file mode 100644 index 5efb61339cdbdde585f7814e9650be2e2df654ac..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './gcnet_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py deleted file mode 100644 index 52bc9f5e91f2fdf9ce8f9e3a873902dd8db56522..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x512_80k_ade20k.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_hr18.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict(decode_head=dict(num_classes=150)) diff --git a/spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-html/tippy.css b/spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-html/tippy.css deleted file mode 100644 index e6ae635cb1f82b176c18afa80dfa029c7a536e70..0000000000000000000000000000000000000000 --- a/spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-html/tippy.css +++ /dev/null @@ -1 +0,0 @@ -.tippy-box[data-animation=fade][data-state=hidden]{opacity:0}[data-tippy-root]{max-width:calc(100vw - 10px)}.tippy-box{position:relative;background-color:#333;color:#fff;border-radius:4px;font-size:14px;line-height:1.4;white-space:normal;outline:0;transition-property:transform,visibility,opacity}.tippy-box[data-placement^=top]>.tippy-arrow{bottom:0}.tippy-box[data-placement^=top]>.tippy-arrow:before{bottom:-7px;left:0;border-width:8px 8px 0;border-top-color:initial;transform-origin:center top}.tippy-box[data-placement^=bottom]>.tippy-arrow{top:0}.tippy-box[data-placement^=bottom]>.tippy-arrow:before{top:-7px;left:0;border-width:0 8px 8px;border-bottom-color:initial;transform-origin:center bottom}.tippy-box[data-placement^=left]>.tippy-arrow{right:0}.tippy-box[data-placement^=left]>.tippy-arrow:before{border-width:8px 0 8px 8px;border-left-color:initial;right:-7px;transform-origin:center left}.tippy-box[data-placement^=right]>.tippy-arrow{left:0}.tippy-box[data-placement^=right]>.tippy-arrow:before{left:-7px;border-width:8px 8px 8px 0;border-right-color:initial;transform-origin:center right}.tippy-box[data-inertia][data-state=visible]{transition-timing-function:cubic-bezier(.54,1.5,.38,1.11)}.tippy-arrow{width:16px;height:16px;color:#333}.tippy-arrow:before{content:"";position:absolute;border-color:transparent;border-style:solid}.tippy-content{position:relative;padding:5px 9px;z-index:1} \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py deleted file mode 100644 index 988d9adf2f289ef223bd1c680a5ae1d3387f0269..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..utils import kaiming_init -from .registry import PLUGIN_LAYERS - - -@PLUGIN_LAYERS.register_module() -class GeneralizedAttention(nn.Module): - """GeneralizedAttention module. - - See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' - (https://arxiv.org/abs/1711.07971) for details. - - Args: - in_channels (int): Channels of the input feature map. - spatial_range (int): The spatial range. -1 indicates no spatial range - constraint. Default: -1. - num_heads (int): The head number of empirical_attention module. - Default: 9. - position_embedding_dim (int): The position embedding dimension. - Default: -1. - position_magnitude (int): A multiplier acting on coord difference. - Default: 1. - kv_stride (int): The feature stride acting on key/value feature map. - Default: 2. - q_stride (int): The feature stride acting on query feature map. - Default: 1. - attention_type (str): A binary indicator string for indicating which - items in generalized empirical_attention module are used. - Default: '1111'. - - - '1000' indicates 'query and key content' (appr - appr) item, - - '0100' indicates 'query content and relative position' - (appr - position) item, - - '0010' indicates 'key content only' (bias - appr) item, - - '0001' indicates 'relative position only' (bias - position) item. - """ - - _abbr_ = 'gen_attention_block' - - def __init__(self, - in_channels, - spatial_range=-1, - num_heads=9, - position_embedding_dim=-1, - position_magnitude=1, - kv_stride=2, - q_stride=1, - attention_type='1111'): - - super(GeneralizedAttention, self).__init__() - - # hard range means local range for non-local operation - self.position_embedding_dim = ( - position_embedding_dim - if position_embedding_dim > 0 else in_channels) - - self.position_magnitude = position_magnitude - self.num_heads = num_heads - self.in_channels = in_channels - self.spatial_range = spatial_range - self.kv_stride = kv_stride - self.q_stride = q_stride - self.attention_type = [bool(int(_)) for _ in attention_type] - self.qk_embed_dim = in_channels // num_heads - out_c = self.qk_embed_dim * num_heads - - if self.attention_type[0] or self.attention_type[1]: - self.query_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=out_c, - kernel_size=1, - bias=False) - self.query_conv.kaiming_init = True - - if self.attention_type[0] or self.attention_type[2]: - self.key_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=out_c, - kernel_size=1, - bias=False) - self.key_conv.kaiming_init = True - - self.v_dim = in_channels // num_heads - self.value_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=self.v_dim * num_heads, - kernel_size=1, - bias=False) - self.value_conv.kaiming_init = True - - if self.attention_type[1] or self.attention_type[3]: - self.appr_geom_fc_x = nn.Linear( - self.position_embedding_dim // 2, out_c, bias=False) - self.appr_geom_fc_x.kaiming_init = True - - self.appr_geom_fc_y = nn.Linear( - self.position_embedding_dim // 2, out_c, bias=False) - self.appr_geom_fc_y.kaiming_init = True - - if self.attention_type[2]: - stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) - appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv - self.appr_bias = nn.Parameter(appr_bias_value) - - if self.attention_type[3]: - stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) - geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv - self.geom_bias = nn.Parameter(geom_bias_value) - - self.proj_conv = nn.Conv2d( - in_channels=self.v_dim * num_heads, - out_channels=in_channels, - kernel_size=1, - bias=True) - self.proj_conv.kaiming_init = True - self.gamma = nn.Parameter(torch.zeros(1)) - - if self.spatial_range >= 0: - # only works when non local is after 3*3 conv - if in_channels == 256: - max_len = 84 - elif in_channels == 512: - max_len = 42 - - max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) - local_constraint_map = np.ones( - (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) - for iy in range(max_len): - for ix in range(max_len): - local_constraint_map[ - iy, ix, - max((iy - self.spatial_range) // - self.kv_stride, 0):min((iy + self.spatial_range + - 1) // self.kv_stride + - 1, max_len), - max((ix - self.spatial_range) // - self.kv_stride, 0):min((ix + self.spatial_range + - 1) // self.kv_stride + - 1, max_len)] = 0 - - self.local_constraint_map = nn.Parameter( - torch.from_numpy(local_constraint_map).byte(), - requires_grad=False) - - if self.q_stride > 1: - self.q_downsample = nn.AvgPool2d( - kernel_size=1, stride=self.q_stride) - else: - self.q_downsample = None - - if self.kv_stride > 1: - self.kv_downsample = nn.AvgPool2d( - kernel_size=1, stride=self.kv_stride) - else: - self.kv_downsample = None - - self.init_weights() - - def get_position_embedding(self, - h, - w, - h_kv, - w_kv, - q_stride, - kv_stride, - device, - dtype, - feat_dim, - wave_length=1000): - # the default type of Tensor is float32, leading to type mismatch - # in fp16 mode. Cast it to support fp16 mode. - h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype) - h_idxs = h_idxs.view((h, 1)) * q_stride - - w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype) - w_idxs = w_idxs.view((w, 1)) * q_stride - - h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to( - device=device, dtype=dtype) - h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride - - w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to( - device=device, dtype=dtype) - w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride - - # (h, h_kv, 1) - h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) - h_diff *= self.position_magnitude - - # (w, w_kv, 1) - w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) - w_diff *= self.position_magnitude - - feat_range = torch.arange(0, feat_dim / 4).to( - device=device, dtype=dtype) - - dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype) - dim_mat = dim_mat**((4. / feat_dim) * feat_range) - dim_mat = dim_mat.view((1, 1, -1)) - - embedding_x = torch.cat( - ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) - - embedding_y = torch.cat( - ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) - - return embedding_x, embedding_y - - def forward(self, x_input): - num_heads = self.num_heads - - # use empirical_attention - if self.q_downsample is not None: - x_q = self.q_downsample(x_input) - else: - x_q = x_input - n, _, h, w = x_q.shape - - if self.kv_downsample is not None: - x_kv = self.kv_downsample(x_input) - else: - x_kv = x_input - _, _, h_kv, w_kv = x_kv.shape - - if self.attention_type[0] or self.attention_type[1]: - proj_query = self.query_conv(x_q).view( - (n, num_heads, self.qk_embed_dim, h * w)) - proj_query = proj_query.permute(0, 1, 3, 2) - - if self.attention_type[0] or self.attention_type[2]: - proj_key = self.key_conv(x_kv).view( - (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) - - if self.attention_type[1] or self.attention_type[3]: - position_embed_x, position_embed_y = self.get_position_embedding( - h, w, h_kv, w_kv, self.q_stride, self.kv_stride, - x_input.device, x_input.dtype, self.position_embedding_dim) - # (n, num_heads, w, w_kv, dim) - position_feat_x = self.appr_geom_fc_x(position_embed_x).\ - view(1, w, w_kv, num_heads, self.qk_embed_dim).\ - permute(0, 3, 1, 2, 4).\ - repeat(n, 1, 1, 1, 1) - - # (n, num_heads, h, h_kv, dim) - position_feat_y = self.appr_geom_fc_y(position_embed_y).\ - view(1, h, h_kv, num_heads, self.qk_embed_dim).\ - permute(0, 3, 1, 2, 4).\ - repeat(n, 1, 1, 1, 1) - - position_feat_x /= math.sqrt(2) - position_feat_y /= math.sqrt(2) - - # accelerate for saliency only - if (np.sum(self.attention_type) == 1) and self.attention_type[2]: - appr_bias = self.appr_bias.\ - view(1, num_heads, 1, self.qk_embed_dim).\ - repeat(n, 1, 1, 1) - - energy = torch.matmul(appr_bias, proj_key).\ - view(n, num_heads, 1, h_kv * w_kv) - - h = 1 - w = 1 - else: - # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for - if not self.attention_type[0]: - energy = torch.zeros( - n, - num_heads, - h, - w, - h_kv, - w_kv, - dtype=x_input.dtype, - device=x_input.device) - - # attention_type[0]: appr - appr - # attention_type[1]: appr - position - # attention_type[2]: bias - appr - # attention_type[3]: bias - position - if self.attention_type[0] or self.attention_type[2]: - if self.attention_type[0] and self.attention_type[2]: - appr_bias = self.appr_bias.\ - view(1, num_heads, 1, self.qk_embed_dim) - energy = torch.matmul(proj_query + appr_bias, proj_key).\ - view(n, num_heads, h, w, h_kv, w_kv) - - elif self.attention_type[0]: - energy = torch.matmul(proj_query, proj_key).\ - view(n, num_heads, h, w, h_kv, w_kv) - - elif self.attention_type[2]: - appr_bias = self.appr_bias.\ - view(1, num_heads, 1, self.qk_embed_dim).\ - repeat(n, 1, 1, 1) - - energy += torch.matmul(appr_bias, proj_key).\ - view(n, num_heads, 1, 1, h_kv, w_kv) - - if self.attention_type[1] or self.attention_type[3]: - if self.attention_type[1] and self.attention_type[3]: - geom_bias = self.geom_bias.\ - view(1, num_heads, 1, self.qk_embed_dim) - - proj_query_reshape = (proj_query + geom_bias).\ - view(n, num_heads, h, w, self.qk_embed_dim) - - energy_x = torch.matmul( - proj_query_reshape.permute(0, 1, 3, 2, 4), - position_feat_x.permute(0, 1, 2, 4, 3)) - energy_x = energy_x.\ - permute(0, 1, 3, 2, 4).unsqueeze(4) - - energy_y = torch.matmul( - proj_query_reshape, - position_feat_y.permute(0, 1, 2, 4, 3)) - energy_y = energy_y.unsqueeze(5) - - energy += energy_x + energy_y - - elif self.attention_type[1]: - proj_query_reshape = proj_query.\ - view(n, num_heads, h, w, self.qk_embed_dim) - proj_query_reshape = proj_query_reshape.\ - permute(0, 1, 3, 2, 4) - position_feat_x_reshape = position_feat_x.\ - permute(0, 1, 2, 4, 3) - position_feat_y_reshape = position_feat_y.\ - permute(0, 1, 2, 4, 3) - - energy_x = torch.matmul(proj_query_reshape, - position_feat_x_reshape) - energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) - - energy_y = torch.matmul(proj_query_reshape, - position_feat_y_reshape) - energy_y = energy_y.unsqueeze(5) - - energy += energy_x + energy_y - - elif self.attention_type[3]: - geom_bias = self.geom_bias.\ - view(1, num_heads, self.qk_embed_dim, 1).\ - repeat(n, 1, 1, 1) - - position_feat_x_reshape = position_feat_x.\ - view(n, num_heads, w*w_kv, self.qk_embed_dim) - - position_feat_y_reshape = position_feat_y.\ - view(n, num_heads, h * h_kv, self.qk_embed_dim) - - energy_x = torch.matmul(position_feat_x_reshape, geom_bias) - energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) - - energy_y = torch.matmul(position_feat_y_reshape, geom_bias) - energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) - - energy += energy_x + energy_y - - energy = energy.view(n, num_heads, h * w, h_kv * w_kv) - - if self.spatial_range >= 0: - cur_local_constraint_map = \ - self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ - contiguous().\ - view(1, 1, h*w, h_kv*w_kv) - - energy = energy.masked_fill_(cur_local_constraint_map, - float('-inf')) - - attention = F.softmax(energy, 3) - - proj_value = self.value_conv(x_kv) - proj_value_reshape = proj_value.\ - view((n, num_heads, self.v_dim, h_kv * w_kv)).\ - permute(0, 1, 3, 2) - - out = torch.matmul(attention, proj_value_reshape).\ - permute(0, 1, 3, 2).\ - contiguous().\ - view(n, self.v_dim * self.num_heads, h, w) - - out = self.proj_conv(out) - - # output is downsampled, upsample back to input size - if self.q_downsample is not None: - out = F.interpolate( - out, - size=x_input.shape[2:], - mode='bilinear', - align_corners=False) - - out = self.gamma * out + x_input - return out - - def init_weights(self): - for m in self.modules(): - if hasattr(m, 'kaiming_init') and m.kaiming_init: - kaiming_init( - m, - mode='fan_in', - nonlinearity='leaky_relu', - bias=0, - distribution='uniform', - a=1) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py deleted file mode 100644 index 1dcf146d8163aff1363e9764999b0a74d674a595..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -import os.path as osp - -import torch -import yaml - -import annotator.uniformer.mmcv as mmcv -from ....parallel.utils import is_module_wrapper -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class PaviLoggerHook(LoggerHook): - - def __init__(self, - init_kwargs=None, - add_graph=False, - add_last_ckpt=False, - interval=10, - ignore_last=True, - reset_flag=False, - by_epoch=True, - img_key='img_info'): - super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag, - by_epoch) - self.init_kwargs = init_kwargs - self.add_graph = add_graph - self.add_last_ckpt = add_last_ckpt - self.img_key = img_key - - @master_only - def before_run(self, runner): - super(PaviLoggerHook, self).before_run(runner) - try: - from pavi import SummaryWriter - except ImportError: - raise ImportError('Please run "pip install pavi" to install pavi.') - - self.run_name = runner.work_dir.split('/')[-1] - - if not self.init_kwargs: - self.init_kwargs = dict() - self.init_kwargs['name'] = self.run_name - self.init_kwargs['model'] = runner._model_name - if runner.meta is not None: - if 'config_dict' in runner.meta: - config_dict = runner.meta['config_dict'] - assert isinstance( - config_dict, - dict), ('meta["config_dict"] has to be of a dict, ' - f'but got {type(config_dict)}') - elif 'config_file' in runner.meta: - config_file = runner.meta['config_file'] - config_dict = dict(mmcv.Config.fromfile(config_file)) - else: - config_dict = None - if config_dict is not None: - # 'max_.*iter' is parsed in pavi sdk as the maximum iterations - # to properly set up the progress bar. - config_dict = config_dict.copy() - config_dict.setdefault('max_iter', runner.max_iters) - # non-serializable values are first converted in - # mmcv.dump to json - config_dict = json.loads( - mmcv.dump(config_dict, file_format='json')) - session_text = yaml.dump(config_dict) - self.init_kwargs['session_text'] = session_text - self.writer = SummaryWriter(**self.init_kwargs) - - def get_step(self, runner): - """Get the total training step/epoch.""" - if self.get_mode(runner) == 'val' and self.by_epoch: - return self.get_epoch(runner) - else: - return self.get_iter(runner) - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner, add_mode=False) - if tags: - self.writer.add_scalars( - self.get_mode(runner), tags, self.get_step(runner)) - - @master_only - def after_run(self, runner): - if self.add_last_ckpt: - ckpt_path = osp.join(runner.work_dir, 'latest.pth') - if osp.islink(ckpt_path): - ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path)) - - if osp.isfile(ckpt_path): - # runner.epoch += 1 has been done before `after_run`. - iteration = runner.epoch if self.by_epoch else runner.iter - return self.writer.add_snapshot_file( - tag=self.run_name, - snapshot_file_path=ckpt_path, - iteration=iteration) - - # flush the buffer and send a task ending signal to Pavi - self.writer.close() - - @master_only - def before_epoch(self, runner): - if runner.epoch == 0 and self.add_graph: - if is_module_wrapper(runner.model): - _model = runner.model.module - else: - _model = runner.model - device = next(_model.parameters()).device - data = next(iter(runner.data_loader)) - image = data[self.img_key][0:1].to(device) - with torch.no_grad(): - self.writer.add_graph(_model, image) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/_distutils_hack/override.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/_distutils_hack/override.py deleted file mode 100644 index 2cc433a4a55e3b41fa31089918fb62096092f89f..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/_distutils_hack/override.py +++ /dev/null @@ -1 +0,0 @@ -__import__('_distutils_hack').do_override() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/py38compat.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/py38compat.py deleted file mode 100644 index 59224e71e50c49e5f9f6f925837597c035a8ab7f..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/py38compat.py +++ /dev/null @@ -1,8 +0,0 @@ -def aix_platform(osname, version, release): - try: - import _aix_support - - return _aix_support.aix_platform() - except ImportError: - pass - return "{}-{}.{}".format(osname, version, release) diff --git a/spaces/AtomdffAI/wechatgpt4atom/channel/wechat/wechat_channel.py b/spaces/AtomdffAI/wechatgpt4atom/channel/wechat/wechat_channel.py deleted file mode 100644 index b800fc43753fad893a485eb214cc9602a7f69af9..0000000000000000000000000000000000000000 --- a/spaces/AtomdffAI/wechatgpt4atom/channel/wechat/wechat_channel.py +++ /dev/null @@ -1,176 +0,0 @@ -# encoding:utf-8 - -""" -wechat channel -""" -import itchat -import json -from itchat.content import * -from channel.channel import Channel -from concurrent.futures import ThreadPoolExecutor -from common.log import logger -from config import conf -import requests -import io - -thread_pool = ThreadPoolExecutor(max_workers=8) - - -class WechatChannel(Channel): - - qrcode = b'' - - newInstance=None - - def __init__(self): - pass - - def startup(self): - # login by scan QRCode - newInstance = itchat.load_sync_itchat() - self.newInstance = newInstance - - @newInstance.msg_register(TEXT) - def handler_single_msg(msg): - self.handle(msg) - return None - - @newInstance.msg_register(TEXT, isGroupChat=True) - def handler_group_msg(msg): - self.handle_group(msg) - return None - - newInstance.auto_login(qrCallback=self.qrCallback) - # start message listener - newInstance.run() - - def qrCallback(self, uuid, status, qrcode): - self.qrcode = qrcode - - def getQrCode(self): - return self.qrcode - - def handle(self, msg): - logger.debug("[WX]receive msg: " + json.dumps(msg, ensure_ascii=False)) - from_user_id = msg['FromUserName'] - to_user_id = msg['ToUserName'] # 接收人id - other_user_id = msg['User']['UserName'] # 对手方id - content = msg['Text'] - match_prefix = self.check_prefix(content, conf().get('single_chat_prefix')) - if from_user_id == other_user_id and match_prefix is not None: - # 好友向自己发送消息 - if match_prefix != '': - str_list = content.split(match_prefix, 1) - if len(str_list) == 2: - content = str_list[1].strip() - - img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix')) - if img_match_prefix: - content = content.split(img_match_prefix, 1)[1].strip() - thread_pool.submit(self._do_send_img, content, from_user_id) - else: - thread_pool.submit(self._do_send, content, from_user_id) - - elif to_user_id == other_user_id and match_prefix: - # 自己给好友发送消息 - str_list = content.split(match_prefix, 1) - if len(str_list) == 2: - content = str_list[1].strip() - img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix')) - if img_match_prefix: - content = content.split(img_match_prefix, 1)[1].strip() - thread_pool.submit(self._do_send_img, content, to_user_id) - else: - thread_pool.submit(self._do_send, content, to_user_id) - - - def handle_group(self, msg): - logger.debug("[WX]receive group msg: " + json.dumps(msg, ensure_ascii=False)) - group_name = msg['User'].get('NickName', None) - group_id = msg['User'].get('UserName', None) - if not group_name: - return "" - origin_content = msg['Content'] - content = msg['Content'] - content_list = content.split(' ', 1) - context_special_list = content.split('\u2005', 1) - if len(context_special_list) == 2: - content = context_special_list[1] - elif len(content_list) == 2: - content = content_list[1] - - config = conf() - match_prefix = (msg['IsAt'] and not config.get("group_at_off", False)) or self.check_prefix(origin_content, config.get('group_chat_prefix')) \ - or self.check_contain(origin_content, config.get('group_chat_keyword')) - if ('ALL_GROUP' in config.get('group_name_white_list') or group_name in config.get('group_name_white_list') or self.check_contain(group_name, config.get('group_name_keyword_white_list'))) and match_prefix: - img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix')) - if img_match_prefix: - content = content.split(img_match_prefix, 1)[1].strip() - thread_pool.submit(self._do_send_img, content, group_id) - else: - thread_pool.submit(self._do_send_group, content, msg) - - def send(self, msg, receiver): - logger.info('[WX] sendMsg={}, receiver={}'.format(msg, receiver)) - self.newInstance.send(msg, toUserName=receiver) - - def _do_send(self, query, reply_user_id): - try: - if not query: - return - context = dict() - context['from_user_id'] = reply_user_id - reply_text = super().build_reply_content(query, context) - if reply_text: - self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id) - except Exception as e: - logger.exception(e) - - def _do_send_img(self, query, reply_user_id): - try: - if not query: - return - context = dict() - context['type'] = 'IMAGE_CREATE' - img_url = super().build_reply_content(query, context) - if not img_url: - return - - # 图片下载 - pic_res = requests.get(img_url, stream=True) - image_storage = io.BytesIO() - for block in pic_res.iter_content(1024): - image_storage.write(block) - image_storage.seek(0) - - # 图片发送 - logger.info('[WX] sendImage, receiver={}'.format(reply_user_id)) - self.newInstance.send_image(image_storage, reply_user_id) - except Exception as e: - logger.exception(e) - - def _do_send_group(self, query, msg): - if not query: - return - context = dict() - context['from_user_id'] = msg['ActualUserName'] - reply_text = super().build_reply_content(query, context) - if reply_text: - reply_text = '@' + msg['ActualNickName'] + ' ' + reply_text.strip() - self.send(conf().get("group_chat_reply_prefix", "") + reply_text, msg['User']['UserName']) - - - def check_prefix(self, content, prefix_list): - for prefix in prefix_list: - if content.startswith(prefix): - return prefix - return None - - - def check_contain(self, content, keyword_list): - if not keyword_list: - return None - for ky in keyword_list: - if content.find(ky) != -1: - return True - return None diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/README.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/README.md deleted file mode 100644 index 0eb44cc3b23beeb1755ab8d12002d26f13434235..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/datasets/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use Builtin Datasets - -A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog) -for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc). -This document explains how to setup the builtin datasets so they can be used by the above APIs. -[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`, -and how to add new datasets to them. - -Detectron2 has builtin support for a few datasets. -The datasets are assumed to exist in a directory specified by the environment variable -`DETECTRON2_DATASETS`. -Under this directory, detectron2 will look for datasets in the structure described below, if needed. -``` -$DETECTRON2_DATASETS/ - coco/ - lvis/ - cityscapes/ - VOC20{07,12}/ -``` - -You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. -If left unset, the default is `./datasets` relative to your current working directory. - -The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) -contains configs and models that use these builtin datasets. - -## Expected dataset structure for [COCO instance/keypoint detection](https://cocodataset.org/#download): - -``` -coco/ - annotations/ - instances_{train,val}2017.json - person_keypoints_{train,val}2017.json - {train,val}2017/ - # image files that are mentioned in the corresponding json -``` - -You can use the 2014 version of the dataset as well. - -Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset, -which you can download with `./datasets/prepare_for_tests.sh`. - -## Expected dataset structure for PanopticFPN: - -Extract panoptic annotations from [COCO website](https://cocodataset.org/#download) -into the following structure: -``` -coco/ - annotations/ - panoptic_{train,val}2017.json - panoptic_{train,val}2017/ # png annotations - panoptic_stuff_{train,val}2017/ # generated by the script mentioned below -``` - -Install panopticapi by: -``` -pip install git+https://github.com/cocodataset/panopticapi.git -``` -Then, run `python datasets/prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. - -## Expected dataset structure for [LVIS instance segmentation](https://www.lvisdataset.org/dataset): -``` -coco/ - {train,val,test}2017/ -lvis/ - lvis_v0.5_{train,val}.json - lvis_v0.5_image_info_test.json - lvis_v1_{train,val}.json - lvis_v1_image_info_test{,_challenge}.json -``` - -Install lvis-api by: -``` -pip install git+https://github.com/lvis-dataset/lvis-api.git -``` - -To evaluate models trained on the COCO dataset using LVIS annotations, -run `python datasets/prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations. - -## Expected dataset structure for [cityscapes](https://www.cityscapes-dataset.com/downloads/): -``` -cityscapes/ - gtFine/ - train/ - aachen/ - color.png, instanceIds.png, labelIds.png, polygons.json, - labelTrainIds.png - ... - val/ - test/ - # below are generated Cityscapes panoptic annotation - cityscapes_panoptic_train.json - cityscapes_panoptic_train/ - cityscapes_panoptic_val.json - cityscapes_panoptic_val/ - cityscapes_panoptic_test.json - cityscapes_panoptic_test/ - leftImg8bit/ - train/ - val/ - test/ -``` -Install cityscapes scripts by: -``` -pip install git+https://github.com/mcordts/cityscapesScripts.git -``` - -Note: to create labelTrainIds.png, first prepare the above structure, then run cityscapesescript with: -``` -CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py -``` -These files are not needed for instance segmentation. - -Note: to generate Cityscapes panoptic dataset, run cityscapesescript with: -``` -CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createPanopticImgs.py -``` -These files are not needed for semantic and instance segmentation. - -## Expected dataset structure for [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html): -``` -VOC20{07,12}/ - Annotations/ - ImageSets/ - Main/ - trainval.txt - test.txt - # train.txt or val.txt, if you use these splits - JPEGImages/ -``` - -## Expected dataset structure for [ADE20k Scene Parsing](http://sceneparsing.csail.mit.edu/): -``` -ADEChallengeData2016/ - annotations/ - annotations_detectron2/ - images/ - objectInfo150.txt -``` -The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`. diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/__init__.py deleted file mode 100644 index 3f4e4df7645c67b7a013295207b98fe70b2e574c..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator -from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead - -__all__ = list(globals().keys()) diff --git a/spaces/Benson/text-generation/Examples/Ai Tipo De Teclado Ms Apk Completo Agrietado.md b/spaces/Benson/text-generation/Examples/Ai Tipo De Teclado Ms Apk Completo Agrietado.md deleted file mode 100644 index d3f05bc4566b4ea3407aced0e1b59c483a5efac7..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Ai Tipo De Teclado Ms Apk Completo Agrietado.md +++ /dev/null @@ -1,87 +0,0 @@ - -

      ¿Qué es APK JustPlay y cómo descargarlo?

      -

      Si usted es un entusiasta de los juegos que le encanta jugar en su dispositivo móvil, es posible que haya oído hablar de APK JustPlay. Pero, ¿qué es exactamente y cómo se puede descargar? En este artículo, vamos a responder a estas preguntas y más. Vamos a explicar lo que es APK JustPlay, cuáles son sus beneficios, cómo descargarlo, y cómo usarlo. Al final de este artículo, usted tendrá una idea clara de lo que APK JustPlay puede ofrecer y cómo usted puede unirse al último programa de lealtad para los jugadores.

      -

      ai tipo de teclado más apk completo agrietado


      Downloadhttps://bltlly.com/2v6M54



      -

      Introducción

      -

      ¿Qué es APK JustPlay?

      -

      APK JustPlay es una aplicación móvil que te recompensa con monedas de lealtad para jugar juegos en tu dispositivo. Es un programa de lealtad único que te ofrece una colección única de juegos que no encontrarás en ningún otro lugar. Puedes ganar monedas de lealtad por el tiempo que pasas jugando a estos juegos, y luego canjearlas por recompensas reales o donarlas a organizaciones benéficas fantásticas. APK JustPlay es desarrollado por JustPlay GmbH, un grupo de entretenimiento móvil con sede en Alemania que crea aplicaciones para teléfonos móviles y tabletas.

      -

      ¿Cuáles son los beneficios de APK JustPlay?

      -

      APK JustPlay tiene muchos beneficios para los jugadores que quieren sacar provecho de su pasión por el juego. Estos son algunos de ellos:

      - -

      ¿Cómo descargar APK JustPlay?

      -

      Paso 1: Visite el sitio web oficial de APK JustPlay

      -

      El primer paso para descargar APK JustPlay es visitar su sitio web oficial en https://apkcombo.com/justplay/com.playjust.app/. Aquí encontrará toda la información que necesita sobre la aplicación, como sus características, capturas de pantalla, comentarios y más. También verá un botón que dice "Descargar APK (33 MB)". Haga clic en él para proceder al siguiente paso.

      -

      Paso 2: Elige tu dispositivo y descarga el archivo APK

      -

      El siguiente paso es elegir el dispositivo y descargar el archivo APK. Un archivo APK es un archivo de paquete de Android que contiene todos los archivos y código necesarios para instalar una aplicación en su dispositivo. Verá una lista de dispositivos que son compatibles con APK JustPlay, como Android 7.0+, Android TV & Tablet, PC Windows, etc. Elija el que coincida con su dispositivo y haga clic en él. A continuación, verá un enlace que dice "Descargar ahora". Haga clic en él y esperar a que la descarga se complete.

      -

      Paso 3: Habilitar fuentes desconocidas en el dispositivo

      -

      El tercer paso es habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de la Google Play Store oficial. Para habilitar fuentes desconocidas, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Luego, encuentra la opción que dice "Permitir la instalación de aplicaciones desde fuentes desconocidas" o algo similar. Activa y confirma tu elección. Esto le permitirá instalar APK JustPlay en su dispositivo.

      -

      Paso 4: Instalar el archivo APK y lanzar la aplicación

      - -

      ¿Cómo usar APK JustPlay?

      -

      ¿Cómo ganar monedas de lealtad jugando?

      -

      Ahora que ha descargado APK JustPlay, es posible que se pregunte cómo usarlo y ganar monedas de lealtad jugando. Es muy simple y divertido. Así es como:

      -
        -
      1. En la pantalla de inicio de la aplicación, verá una lista de juegos que están disponibles para jugar. Puedes desplazarte por ellos y elegir los que te interesan.
      2. -
      3. Toca el icono del juego y verás una ventana emergente que te muestra cuántas monedas puedes ganar por minuto al jugar ese juego. También verá un botón que dice "Jugar ahora". Toque en él y el juego comenzará a cargar.
      4. -
      5. Disfruta jugando el juego tanto como quieras. Cuanto más tiempo pases jugando, más monedas ganarás. Puedes ver tu balance de monedas en la esquina superior derecha de la pantalla.
      6. -
      7. Cuando haya terminado de jugar, toque en el botón de atrás y volverá a la pantalla de inicio de la aplicación. Verá su saldo de monedas actualizado y un mensaje que lo felicita por ganar monedas.
      8. -
      -

      ¿Cómo canjear sus monedas por recompensas reales o donar a organizaciones benéficas?

      -

      Una vez que hayas acumulado suficientes monedas jugando, puedes canjearlas por recompensas reales o donarlas a organizaciones benéficas. Así es como:

      -
        -
      1. Toque en el icono del menú en la esquina superior izquierda de la pantalla y seleccione "Recompensas".
      2. -
      3. Verá una lista de recompensas que están disponibles para que usted elija. Puede filtrarlas por categoría, como efectivo, tarjetas de regalo o donaciones.
      4. -
      5. Seleccione la recompensa que desea y toque en ella. Verás una ventana emergente que te muestra cuántas monedas necesitas para canjear esa recompensa y un botón que dice "Canjear ahora". Toque en él y confirme su elección.
      6. -
      7. Usted recibirá un correo electrónico con instrucciones sobre cómo reclamar su recompensa o hacer su donación. Sigue las instrucciones y disfruta de tu recompensa o siéntete bien con tu donación.
      8. -
      -

      ¿Cómo descubrir nuevos juegos y temas?

      - -
        -
      1. Toque en el icono del menú en la esquina superior izquierda de la pantalla y seleccione "Descubrir".
      2. -
      3. Verás una lista de temas que están disponibles para que explores, como acción, aventura, rompecabezas, deportes, etc.
      4. -
      5. Seleccione el tema que desea y toque en él. Verá una lista de juegos que pertenecen a ese tema.
      6. -
      7. Toque en cualquier icono del juego y verá una ventana emergente que le muestra una breve descripción del juego y un botón que dice "Jugar ahora". Toque en él y empezar a jugar.
      8. -
      -

      Conclusión

      -

      Resumen de los puntos principales

      -

      En conclusión, APK JustPlay es una aplicación increíble que te recompensa con monedas de lealtad para jugar juegos en su dispositivo. Usted puede ganar dinero diariamente mediante la recogida de puntos de lealtad para jugar juegos que te gustan. Puede elegir entre retiros de PayPal, tarjetas de regalo o donaciones para apoyar causas cercanas a su corazón. Usted puede disfrutar de pagos diarios cada 3 horas, que le da la flexibilidad para canjear sus monedas cuando lo desee. Usted puede descubrir una colección única de juegos que son exclusivos de APK JustPlay, y disfrutar de una variedad de géneros y temas. Usted puede hacer una diferencia mediante la donación de sus ganancias a su caridad preferida - y APK JustPlay coincidirá con cada dólar que usted da! Puedes unirte a más de 10 millones de jugadores satisfechos que forman parte de la comunidad JustPlay y compartir tu experiencia de juego con ellos.

      -

      -

      Llamada a la acción e invitación a unirse a la comunidad JustPlay

      -

      Si usted está listo para unirse al último programa de lealtad para los jugadores, descargar APK JustPlay hoy y empezar a ganar monedas de lealtad jugando. Es gratis, fácil y divertido. No te arrepentirás. APK JustPlay es la mejor manera de disfrutar de los juegos y ser recompensado por ello. No se pierda esta oportunidad de unirse a la comunidad JustPlay y hacer una diferencia en el mundo. Descargar APK JustPlay ahora y empezar a jugar!

      -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes sobre APK JustPlay:

      - - -Pregunta -Respuesta - - -¿Es APK JustPlay seguro y legal? -Sí, APK JustPlay es seguro y legal. Es desarrollado por una empresa de buena reputación que sigue todas las reglas y reglamentos de la Google Play Store. No contiene ningún virus, malware o spyware. No requiere ninguna información personal ni acceso a los datos de su dispositivo. No interfiere con el rendimiento de su dispositivo o la duración de la batería. Es una aplicación legítima que le paga por jugar. - - -¿Cuántos juegos están disponibles en APK JustPlay? -APK JustPlay tiene más de 100 juegos que están disponibles para que usted juegue. Estos juegos son exclusivos de APK JustPlay y no los encontrarás en ningún otro lugar. Cubren una amplia gama de géneros y temas, tales como acción, aventura, rompecabezas, deportes, etc. Nunca te aburrirás con APK JustPlay. - - -¿Cuánto dinero puedo hacer con APK JustPlay? -La cantidad de dinero que puede hacer con APK JustPlay depende de cuánto tiempo pasas jugando y cuántas monedas ganas. Cuanto más juegas, más ganas. Puedes ganar hasta $10 por día jugando juegos en APK JustPlay. También puedes ganar monedas extra invitando a tus amigos a unirse a la aplicación, completando encuestas, viendo videos y participando en concursos. - - -¿Cuáles son los requisitos mínimos para usar APK JustPlay? -Para utilizar APK JustPlay, es necesario tener un dispositivo que se ejecuta en Android 7.0 o superior. También necesita tener una conexión a Internet y suficiente espacio de almacenamiento en su dispositivo. No es necesario tener una cuenta de Google o una tarjeta de crédito para utilizar APK JustPlay. - - -¿Cómo puedo contactar al equipo de soporte de APK JustPlay? -Si tiene alguna pregunta, problema o comentario sobre APK JustPlay, puede ponerse en contacto con el equipo de soporte enviando un correo electrónico a support@justplay.app. También puede visitar la página Página de Facebook de APK JustPlay para obtener más información. - -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Clash Mini Para PC.md b/spaces/Benson/text-generation/Examples/Descargar Clash Mini Para PC.md deleted file mode 100644 index 54ccdaaaa7c556672089f392b37fd2b4ebdd49c6..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Clash Mini Para PC.md +++ /dev/null @@ -1,123 +0,0 @@ -
      -

      Clash Mini: Un juego de mesa divertido y lleno de estrategia

      -

      Si eres un fan del Clash Universe, te encantará Clash Mini, un nuevo juego de Supercell que te permite batirte en duelo en un divertido juego de mesa. Recoge, invoca y actualiza tu ejército de Minis, que son versiones adorables de tus personajes favoritos de Clash, y míralos chocar en emocionantes batallas en tiempo real. Predecir los movimientos de su oponente y montar su estrategia ganadora y la formación. Dirige tu ejército con héroes icónicos como el Rey Bárbaro, la Reina Arquera, la Doncella del Escudo y más. Cambia la marea de batalla intercambiando y actualizando tus Minis entre rondas. Juega casualmente por diversión o en partidos clasificados para aumentar tu posición en la liga. Clash Mini es fácil de aprender pero difícil de dominar. ¡Prepárate para tus Minis para lanzar el estruendo más grande!

      -

      descargar clash mini para PC


      Download ✑ ✑ ✑ https://bltlly.com/2v6KXh



      -

      Pero ¿qué pasa si quieres jugar Clash Mini en una pantalla más grande, con mejores controles y más rendimiento? Bueno, puedes hacer eso jugando Clash Mini en tu PC. En este artículo, te mostraremos cómo descargar e instalar Clash Mini en tu PC, cómo jugarlo y algunos consejos y trucos para ayudarte a ganar más juegos.

      -

      Cómo descargar e instalar Clash Mini en su PC

      -

      Hay dos formas principales de jugar Clash Mini en su PC. Una es usar Windows 11 y emulación nativa de Android, que es la forma oficial de ejecutar aplicaciones Android en Windows. La otra es utilizar un emulador de Android como Bluestacks 5, que es un software de terceros que simula un dispositivo Android en su PC. Ambos métodos tienen sus pros y sus contras, así que puedes elegir el que más te convenga.

      -

      Opción 1: Usa Windows 11 y emulación nativa de Android

      - -

      Para usar esta función, necesita tener un equipo con Windows 11 que cumpla con los requisitos mínimos para ejecutar aplicaciones Android. También necesita tener una cuenta de Microsoft y una cuenta de Amazon. Luego, debe seguir estos pasos:

      -

      -
        -
      1. Abra la aplicación Microsoft Store en su PC y busque "Subsistema de Windows para Android". Instálelo en su PC.
      2. -
      3. Abra la aplicación de Microsoft Store de nuevo y busque "Amazon Appstore". Instálela en su PC.
      4. -
      5. Abra la aplicación Amazon Appstore en su PC e inicie sesión con su cuenta de Amazon.
      6. -
      7. Buscar "Clash Mini" en la aplicación Amazon Appstore e instalarlo en su PC.
      8. -
      9. Abra el menú Inicio en su PC y busque "Clash Mini ". Haga clic en él para iniciar el juego.
      10. -
      -

      Felicidades, ha instalado y ejecuta Clash Mini en su PC con Windows 11 y emulación nativa de Android. Ahora puedes disfrutar del juego en una pantalla más grande, con mejores gráficos y un rendimiento más rápido. También puede utilizar el ratón y el teclado o un controlador para jugar el juego.

      -

      Opción 2: Utilice un emulador de Android como Bluestacks 5

      -

      Si no tienes una computadora con Windows 11, o si prefieres usar un método diferente, puedes usar un emulador de Android como Bluestacks 5 para jugar Clash Mini en tu PC. Un emulador de Android es un software que simula un dispositivo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android en su PC. Bluestacks 5 es uno de los emuladores de Android más populares y fiables, con más de 500 millones de usuarios en todo el mundo. Ofrece alto rendimiento, compatibilidad, personalización y seguridad para jugar juegos de Android en PC.

      -

      Para usar este método, necesita tener un PC que cumpla con los requisitos mínimos para ejecutar Bluestacks 5. También necesita tener una cuenta de Google. Luego, debes seguir estos pasos:

      -
        -
      1. Ir al sitio web oficial de Bluestacks 5 y descargar el instalador para su PC.
      2. - -
      3. Abre Bluestacks 5 e inicia sesión con tu cuenta de Google.
      4. -
      5. Ir a la aplicación Google Play Store en Bluestacks 5 y buscar "Clash Mini". Instalarlo en Bluestacks 5.
      6. -
      7. Ir a la pantalla de inicio de Bluestacks 5 y buscar "Clash Mini". Haga clic en él para iniciar el juego.
      8. -
      -

      Felicidades, has instalado y ejecutado con éxito Clash Mini en tu PC usando Bluestacks 5. Ahora puedes disfrutar del juego en una pantalla más grande, con mejores gráficos y un rendimiento más rápido. También puede utilizar el ratón y el teclado o un controlador para jugar el juego.

      -

      Cómo jugar Clash Mini en su PC

      -

      Ahora que ha instalado Clash Mini en su PC, es posible que se pregunte cómo jugarlo. Bueno, no se preocupe, tenemos todo cubierto. Estos son algunos pasos básicos y consejos sobre cómo jugar Clash Mini en su PC:

      -

      Elige tus Minis y Héroes

      -

      Lo primero que tienes que hacer es elegir tu ejército de Minis y Héroes. Minis son versiones lindas de personajes de choque que tienen diferentes habilidades y roles en la batalla. Los héroes son líderes poderosos que pueden aumentar tus Minis y liberar habilidades especiales. Puedes recoger minis y héroes abriendo cofres, completando misiones o comprándolos con gemas. También puedes mejorarlos con oro y cartas.

      -

      Puedes tener hasta ocho Minis y un Héroe en tu ejército. Puedes personalizar tu ejército según tu preferencia y estrategia. También puede crear diferentes cubiertas para diferentes modos y situaciones. Para elegir tus Minis y Héroes, ve a la pestaña Ejército en el menú principal y arrástralos y suéltalos en las ranuras. También puede tocar en ellos para ver sus estadísticas y habilidades.

      -

      Organiza tu ejército en el tablero

      -

      Lo siguiente que tienes que hacer es organizar tu ejército en el tablero. El tablero es donde tienen lugar las batallas. Tiene nueve fichas para cada jugador, donde puedes colocar tus Minis. El tablero también tiene obstáculos que pueden bloquear o afectar los movimientos y ataques de tus Minis.

      - -

      Actualiza tus Minis durante la batalla

      -

      La tercera cosa que necesitas hacer es actualizar tus Minis durante la batalla. Actualizar tus Minis puede hacerlos más fuertes, más rápidos o más duraderos. También puede desbloquear nuevas habilidades o efectos para ellos. Actualizar tus Minis puede darte una ventaja sobre tu oponente en la batalla.

      -

      Puedes actualizar tus Minis durante la batalla usando el oro que ganas al derrotar a los Minis enemigos o desde cofres. Puede actualizar hasta tres veces por ronda, pero cada actualización cuesta más oro que la anterior. Para actualizar tus Minis durante la batalla, toca el botón de actualización en la parte inferior de la pantalla y selecciona el Mini que deseas actualizar.

      -

      Usa el ratón y el teclado o un controlador

      -

      Lo último que tienes que hacer es usar el ratón y el teclado o un mando para jugar. Jugar Clash Mini en tu PC te da la ventaja de tener mejores controles y precisión que jugar en un dispositivo móvil. Puedes usar el ratón y el teclado o un mando para interactuar con el juego y realizar varias acciones.

      -

      Puede utilizar el ratón para arrastrar y soltar sus Minis en el tablero, para tocar los botones y menús, y para desplazarse y acercar y alejar. Puede utilizar el teclado para utilizar accesos directos y teclas de acceso rápido para un juego más rápido y fácil. También puede utilizar un controlador para jugar el juego, siempre y cuando sea compatible con su PC y el juego. Puedes personalizar tus controles y ajustes en el menú Opciones del juego.

      -

      Consejos y trucos para jugar Clash Mini en su PC

      -

      Ahora que sabes cómo jugar Clash Mini en tu PC, es posible que estés buscando algunos consejos y trucos para mejorar tus habilidades y ganar más juegos. Bueno, no te preocupes, te tenemos cubierto. Estos son algunos consejos y trucos para jugar a Clash Mini en tu PC:

      -

      Anticipa los movimientos de tu oponente

      - -

      Por ejemplo, si ves que tu oponente tiene un montón de Minis a distancia, es posible que quieras colocar algunos Minis tanky delante de ellos para bloquear sus disparos. Si ves que tu oponente tiene un héroe que puede curar a sus Minis, es posible que desee centrarse en sacar a ese héroe primero. Si ves que tu oponente tiene un Mini que puede aturdir o congelar tus Minis, es posible que desees extender tus Minis o usar un Mini que pueda limpiarlos o inmunizarlos.

      -

      Ajusta tu estrategia según el modo

      -

      Otra habilidad importante en Clash Mini es ajustar tu estrategia de acuerdo al modo que estás jugando. Hay diferentes modos en Clash Mini, como eventos casuales, clasificados, amistosos y especiales. Cada modo tiene diferentes reglas, objetivos, recompensas y desafíos. Necesitas adaptar tu estrategia de acuerdo al modo en el que estás jugando y la situación que estás enfrentando.

      -

      Por ejemplo, en el modo Casual, puedes jugar por diversión y experimentar con diferentes Minis y Héroes sin preocuparte por perder trofeos o rangos. En el modo Clasificado, necesitas jugar más serio y competitivo para subir las ligas y ganar recompensas. En el modo Amistoso, puedes jugar con o contra tus amigos o compañeros de clan por diversión o práctica. En el modo Eventos especiales, puedes jugar con reglas o modificadores únicos que cambian el modo de juego.

      -

      Experimenta con diferentes combinaciones y habilidades

      -

      Uno de los aspectos más divertidos de Clash Mini es experimentar con diferentes combinaciones y habilidades de Minis y Héroes. Hay muchos Minis y Héroes en Clash Mini, cada uno con sus propias habilidades y roles únicos. Puedes mezclarlos y combinarlos para crear diferentes sinergias y efectos. También puedes actualizarlos o intercambiarlos durante la batalla para cambiar sus habilidades o efectos.

      - -

      Sincroniza tu progreso entre dispositivos

      -

      Una de las características más convenientes de Clash Mini es que puedes sincronizar tu progreso a través de dispositivos. Esto significa que puede jugar el juego en su PC o dispositivo móvil sin perder ningún dato o progreso. Puedes cambiar de dispositivo en cualquier momento sin problemas.

      -

      Para sincronizar tu progreso entre dispositivos, necesitas vincular tu cuenta de juego con Google Play Games (para dispositivos Android) o Game Center (para dispositivos iOS). También necesita tener una conexión a Internet cuando cambie de dispositivo. Para vincular tu cuenta de juego con Google Play Games o Game Center, ve al menú Configuración del juego y toca el botón Enlace.

      -

      Conclusión

      -

      Clash Mini es un juego de mesa divertido y lleno de estrategia que te permite batirte en duelo en el Universo Clash. Puedes reunir, convocar y actualizar tu ejército de Minis y Héroes, verlos chocar en emocionantes batallas en tiempo real, predecir los movimientos de tu oponente y armar tu estrategia ganadora y formación. Puedes jugar casualmente por diversión o en partidos clasificados para aumentar tu posición en la liga.

      -

      Pero ¿qué pasa si quieres jugar Clash Mini en una pantalla más grande, con mejores controles y más rendimiento? Bueno, puedes hacer eso jugando Clash Mini en tu PC. Puede usar Windows 11 y la emulación nativa de Android, que es la forma oficial de ejecutar aplicaciones Android en Windows. O puede utilizar un emulador de Android como Bluestacks 5, que es un software de terceros que simula un dispositivo Android en su PC. Ambos métodos tienen sus pros y sus contras, así que puedes elegir el que más te convenga.

      -

      Jugar Clash Mini en tu PC te da la ventaja de tener mejores gráficos, un rendimiento más rápido y más precisión que jugar en un dispositivo móvil. También puede utilizar el ratón y el teclado o un controlador para jugar el juego. También puede sincronizar su progreso en todos los dispositivos, por lo que puede cambiar entre su PC y su dispositivo móvil en cualquier momento que desee.

      - -

      Entonces, ¿qué estás esperando? Descarga Clash Mini en tu PC hoy y disfruta del juego de mesa divertido y lleno de estrategia. ¡Te encantará!

      -

      Preguntas frecuentes

      -

      ¿Cuáles son los requisitos mínimos para ejecutar Clash Mini en el PC?

      -

      Para ejecutar Clash Mini en PC usando Windows 11 y emulación nativa de Android, necesita tener un equipo con Windows 11 que cumpla con estos requisitos mínimos:

      - -

      Para ejecutar Clash Mini en PC usando Bluestacks 5, necesita tener un PC que cumpla con estos requisitos mínimos:

      - -

      ¿Cómo puedo obtener juegos de Google Play en Windows 11?

      -

      Si desea usar Google Play Games en Windows 11, debe instalarlo por separado desde la Appstore de Amazon. Para hacerlo, debes seguir estos pasos:

      -
        -
      1. Abra la aplicación Windows Subsistema para Android en su PC y vaya a la pestaña Opciones de desarrollador.
      2. -
      3. Habilitar el modo de desarrollador y depuración ADB.
      4. -
      5. Descargar el archivo APK de Google Play Games de una fuente de confianza.
      6. -
      7. Conecte su PC a su dispositivo Android utilizando un cable USB.
      8. -
      9. Abra una ventana del símbolo del sistema en su PC y escriba "dispositivos adb" para comprobar si se detecta su dispositivo.
      10. - -
      11. Abra la aplicación Google Play Games en su PC e inicie sesión con su cuenta de Google.
      12. -
      -

      ¿Cuáles son los mejores Minis y Héroes para usar en Clash Mini?

      -

      La respuesta a esta pregunta depende de su preferencia personal y estrategia. Sin embargo, algunos consejos generales son:

      - -

      ¿Cómo puedo ganar recompensas y puntos en Clash Mini?

      -

      Puedes ganar recompensas y puntos en Clash Mini haciendo varias actividades en el juego, como:

      - -

      ¿Cómo puedo encontrar amigos y chatear con otros jugadores en Clash Mini?

      -

      Puedes encontrar amigos y chatear con otros jugadores en Clash Mini usando las características sociales del juego, como:

      -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/__init__.py deleted file mode 100644 index b22f7abb93b9d7aeee50829b35746aaa3f9f5feb..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/__init__.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -pip._vendor is for vendoring dependencies of pip to prevent needing pip to -depend on something external. - -Files inside of pip._vendor should be considered immutable and should only be -updated to versions from upstream. -""" -from __future__ import absolute_import - -import glob -import os.path -import sys - -# Downstream redistributors which have debundled our dependencies should also -# patch this value to be true. This will trigger the additional patching -# to cause things like "six" to be available as pip. -DEBUNDLED = False - -# By default, look in this directory for a bunch of .whl files which we will -# add to the beginning of sys.path before attempting to import anything. This -# is done to support downstream re-distributors like Debian and Fedora who -# wish to create their own Wheels for our dependencies to aid in debundling. -WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) - - -# Define a small helper function to alias our vendored modules to the real ones -# if the vendored ones do not exist. This idea of this was taken from -# https://github.com/kennethreitz/requests/pull/2567. -def vendored(modulename): - vendored_name = "{0}.{1}".format(__name__, modulename) - - try: - __import__(modulename, globals(), locals(), level=0) - except ImportError: - # We can just silently allow import failures to pass here. If we - # got to this point it means that ``import pip._vendor.whatever`` - # failed and so did ``import whatever``. Since we're importing this - # upfront in an attempt to alias imports, not erroring here will - # just mean we get a regular import error whenever pip *actually* - # tries to import one of these modules to use it, which actually - # gives us a better error message than we would have otherwise - # gotten. - pass - else: - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) - - -# If we're operating in a debundled setup, then we want to go ahead and trigger -# the aliasing of our vendored libraries as well as looking for wheels to add -# to our sys.path. This will cause all of this code to be a no-op typically -# however downstream redistributors can enable it in a consistent way across -# all platforms. -if DEBUNDLED: - # Actually look inside of WHEEL_DIR to find .whl files and add them to the - # front of our sys.path. - sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path - - # Actually alias all of our vendored dependencies. - vendored("cachecontrol") - vendored("certifi") - vendored("colorama") - vendored("distlib") - vendored("distro") - vendored("six") - vendored("six.moves") - vendored("six.moves.urllib") - vendored("six.moves.urllib.parse") - vendored("packaging") - vendored("packaging.version") - vendored("packaging.specifiers") - vendored("pep517") - vendored("pkg_resources") - vendored("platformdirs") - vendored("progress") - vendored("requests") - vendored("requests.exceptions") - vendored("requests.packages") - vendored("requests.packages.urllib3") - vendored("requests.packages.urllib3._collections") - vendored("requests.packages.urllib3.connection") - vendored("requests.packages.urllib3.connectionpool") - vendored("requests.packages.urllib3.contrib") - vendored("requests.packages.urllib3.contrib.ntlmpool") - vendored("requests.packages.urllib3.contrib.pyopenssl") - vendored("requests.packages.urllib3.exceptions") - vendored("requests.packages.urllib3.fields") - vendored("requests.packages.urllib3.filepost") - vendored("requests.packages.urllib3.packages") - vendored("requests.packages.urllib3.packages.ordered_dict") - vendored("requests.packages.urllib3.packages.six") - vendored("requests.packages.urllib3.packages.ssl_match_hostname") - vendored("requests.packages.urllib3.packages.ssl_match_hostname." - "_implementation") - vendored("requests.packages.urllib3.poolmanager") - vendored("requests.packages.urllib3.request") - vendored("requests.packages.urllib3.response") - vendored("requests.packages.urllib3.util") - vendored("requests.packages.urllib3.util.connection") - vendored("requests.packages.urllib3.util.request") - vendored("requests.packages.urllib3.util.response") - vendored("requests.packages.urllib3.util.retry") - vendored("requests.packages.urllib3.util.ssl_") - vendored("requests.packages.urllib3.util.timeout") - vendored("requests.packages.urllib3.util.url") - vendored("resolvelib") - vendored("rich") - vendored("rich.console") - vendored("rich.highlighter") - vendored("rich.logging") - vendored("rich.markup") - vendored("rich.progress") - vendored("rich.segment") - vendored("rich.style") - vendored("rich.text") - vendored("rich.traceback") - vendored("tenacity") - vendored("tomli") - vendored("urllib3") diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py deleted file mode 100644 index bb2cafa18011e7115773055338291c366f173d6f..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_emoji_replace.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Callable, Match, Optional -import re - -from ._emoji_codes import EMOJI - - -_ReStringMatch = Match[str] # regex match object -_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub -_EmojiSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re - - -def _emoji_replace( - text: str, - default_variant: Optional[str] = None, - _emoji_sub: _EmojiSubMethod = re.compile(r"(:(\S*?)(?:(?:\-)(emoji|text))?:)").sub, -) -> str: - """Replace emoji code in text.""" - get_emoji = EMOJI.__getitem__ - variants = {"text": "\uFE0E", "emoji": "\uFE0F"} - get_variant = variants.get - default_variant_code = variants.get(default_variant, "") if default_variant else "" - - def do_replace(match: Match[str]) -> str: - emoji_code, emoji_name, variant = match.groups() - try: - return get_emoji(emoji_name.lower()) + get_variant( - variant, default_variant_code - ) - except KeyError: - return emoji_code - - return _emoji_sub(do_replace, text) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py deleted file mode 100644 index 8c6167fb3a6b390c5c0a3ba455f76cedf34695f2..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/before_sleep.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2016 Julien Danjou -# Copyright 2016 Joshua Harlow -# Copyright 2013-2014 Ray Holder -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import typing - -from pip._vendor.tenacity import _utils - -if typing.TYPE_CHECKING: - import logging - - from pip._vendor.tenacity import RetryCallState - - -def before_sleep_nothing(retry_state: "RetryCallState") -> None: - """Before call strategy that does nothing.""" - - -def before_sleep_log( - logger: "logging.Logger", - log_level: int, - exc_info: bool = False, -) -> typing.Callable[["RetryCallState"], None]: - """Before call strategy that logs to some logger the attempt.""" - - def log_it(retry_state: "RetryCallState") -> None: - local_exc_info: BaseException | bool | None - - if retry_state.outcome is None: - raise RuntimeError("log_it() called before outcome was set") - - if retry_state.next_action is None: - raise RuntimeError("log_it() called before next_action was set") - - if retry_state.outcome.failed: - ex = retry_state.outcome.exception() - verb, value = "raised", f"{ex.__class__.__name__}: {ex}" - - if exc_info: - local_exc_info = retry_state.outcome.exception() - else: - local_exc_info = False - else: - verb, value = "returned", retry_state.outcome.result() - local_exc_info = False # exc_info does not apply when no exception - - if retry_state.fn is None: - # NOTE(sileht): can't really happen, but we must please mypy - fn_name = "" - else: - fn_name = _utils.get_callback_name(retry_state.fn) - - logger.log( - log_level, - f"Retrying {fn_name} " f"in {retry_state.next_action.sleep} seconds as it {verb} {value}.", - exc_info=local_exc_info, - ) - - return log_it diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/errors.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/errors.py deleted file mode 100644 index ec7fb3b6c4856708dc6bc3b0c35fd8df73156029..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/errors.py +++ /dev/null @@ -1,58 +0,0 @@ -"""setuptools.errors - -Provides exceptions used by setuptools modules. -""" - -from distutils import errors as _distutils_errors - - -# Re-export errors from distutils to facilitate the migration to PEP632 - -ByteCompileError = _distutils_errors.DistutilsByteCompileError -CCompilerError = _distutils_errors.CCompilerError -ClassError = _distutils_errors.DistutilsClassError -CompileError = _distutils_errors.CompileError -ExecError = _distutils_errors.DistutilsExecError -FileError = _distutils_errors.DistutilsFileError -InternalError = _distutils_errors.DistutilsInternalError -LibError = _distutils_errors.LibError -LinkError = _distutils_errors.LinkError -ModuleError = _distutils_errors.DistutilsModuleError -OptionError = _distutils_errors.DistutilsOptionError -PlatformError = _distutils_errors.DistutilsPlatformError -PreprocessError = _distutils_errors.PreprocessError -SetupError = _distutils_errors.DistutilsSetupError -TemplateError = _distutils_errors.DistutilsTemplateError -UnknownFileError = _distutils_errors.UnknownFileError - -# The root error class in the hierarchy -BaseError = _distutils_errors.DistutilsError - - -class RemovedCommandError(BaseError, RuntimeError): - """Error used for commands that have been removed in setuptools. - - Since ``setuptools`` is built on ``distutils``, simply removing a command - from ``setuptools`` will make the behavior fall back to ``distutils``; this - error is raised if a command exists in ``distutils`` but has been actively - removed in ``setuptools``. - """ - - -class PackageDiscoveryError(BaseError, RuntimeError): - """Impossible to perform automatic discovery of packages and/or modules. - - The current project layout or given discovery options can lead to problems when - scanning the project directory. - - Setuptools might also refuse to complete auto-discovery if an error prone condition - is detected (e.g. when a project is organised as a flat-layout but contains - multiple directories that can be taken as top-level packages inside a single - distribution [*]_). In these situations the users are encouraged to be explicit - about which packages to include or to make the discovery parameters more specific. - - .. [*] Since multi-package distributions are uncommon it is very likely that the - developers did not intend for all the directories to be packaged, and are just - leaving auxiliary code in the repository top-level, such as maintenance-related - scripts. - """ diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_appengine_environ.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_appengine_environ.py deleted file mode 100644 index 8765b907d70c4a530bc90dc88f24b3df73473b01..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_appengine_environ.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -This module provides means to detect the App Engine environment. -""" - -import os - - -def is_appengine(): - return is_local_appengine() or is_prod_appengine() - - -def is_appengine_sandbox(): - """Reports if the app is running in the first generation sandbox. - - The second generation runtimes are technically still in a sandbox, but it - is much less restrictive, so generally you shouldn't need to check for it. - see https://cloud.google.com/appengine/docs/standard/runtimes - """ - return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27" - - -def is_local_appengine(): - return "APPENGINE_RUNTIME" in os.environ and os.environ.get( - "SERVER_SOFTWARE", "" - ).startswith("Development/") - - -def is_prod_appengine(): - return "APPENGINE_RUNTIME" in os.environ and os.environ.get( - "SERVER_SOFTWARE", "" - ).startswith("Google App Engine/") - - -def is_prod_appengine_mvms(): - """Deprecated.""" - return False diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/INSTALL.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/INSTALL.md deleted file mode 100644 index df998f771ad9750a69b8adaeab27e7c12df9d907..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/INSTALL.md +++ /dev/null @@ -1,175 +0,0 @@ -## Installation - -Our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -has step-by-step instructions that install detectron2. -The [Dockerfile](https://github.com/facebookresearch/detectron2/blob/master/docker/Dockerfile) -also installs detectron2 with a few simple commands. - -### Requirements -- Linux or macOS with Python ≥ 3.6 -- PyTorch ≥ 1.3 -- [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. - You can install them together at [pytorch.org](https://pytorch.org) to make sure of this. -- OpenCV, optional, needed by demo and visualization -- pycocotools: `pip install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'` - - -### Build Detectron2 from Source - -After having the above dependencies and gcc & g++ ≥ 5, run: -``` -python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' -# (add --user if you don't have permission) - -# Or, to install it from a local clone: -git clone https://github.com/facebookresearch/detectron2.git -cd detectron2 && python -m pip install -e . - -# Or if you are on macOS -# CC=clang CXX=clang++ python -m pip install -e . -``` - -To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the -old build first. You often need to rebuild detectron2 after reinstalling PyTorch. - -### Install Pre-Built Detectron2 -``` -# for CUDA 10.1: -python -m pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html -``` -You can replace cu101 with "cu{100,92}" or "cpu". - -Note that: -1. Such installation has to be used with the latest official PyTorch release (currently 1.4). - It will not work with your custom build of PyTorch. -2. Such installation is out-of-date w.r.t. master branch of detectron2. It may not be - compatible with the master branch of a research project that uses detectron2 (e.g. those in - [projects](./projects) or [meshrcnn](https://github.com/facebookresearch/meshrcnn/)). - -### Common Installation Issues - -If you met issues using the pre-built detectron2, please uninstall it and try building it from source. - -Click each issue for its solutions: - -
      - -Undefined torch/aten/caffe2 symbols, or segmentation fault immediately when running the library. - -
      - -This can happen if detectron2 or torchvision is not -compiled with the version of PyTorch you're running. - -If you use a pre-built torchvision, uninstall torchvision & pytorch, and reinstall them -following [pytorch.org](http://pytorch.org). -If you manually build detectron2 or torchvision, remove the files you built (`build/`, `**/*.so`) -and rebuild them. - -If you cannot resolve the problem, please include the output of `gdb -ex "r" -ex "bt" -ex "quit" --args python -m detectron2.utils.collect_env` -in your issue. -
      - -
      - -Undefined C++ symbols (e.g. `GLIBCXX`) or C++ symbols not found. - -
      -Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime. - -This often happens with old anaconda. -Try `conda update libgcc`. Then rebuild detectron2. - -The fundamental solution is to run the code with sufficiently new C++ runtime -using `LD_PRELOAD=/path/to/libstdc++.so` - -
      - -
      - -"Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available". - -
      -CUDA is not found when building detectron2. -You should make sure - -``` -python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)' -``` - -print valid outputs at the time you build detectron2. - -Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config. -
      - -
      - -"invalid device function" or "no kernel image is available for execution". - -
      -Two possibilities: - -* You build detectron2 with one version of CUDA but run it with a different version. - - To check whether it is the case, - use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. - In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" - to contain cuda libraries of the same version. - - When they are inconsistent, - you need to either install a different build of PyTorch (or build by yourself) - to match your local CUDA installation, or install a different version of CUDA to match PyTorch. - -* Detectron2 or PyTorch/torchvision is not built for the correct GPU architecture (compute compatibility). - - The GPU architecture for PyTorch/detectron2/torchvision is available in the "architecture flags" in - `python -m detectron2.utils.collect_env`. - - The GPU architecture flags of detectron2/torchvision by default matches the GPU model detected - during building. This means the compiled code may not work on a different GPU model. - To overwrite the GPU architecture for detectron2/torchvision, use `TORCH_CUDA_ARCH_LIST` environment variable during building. - - For example, `export TORCH_CUDA_ARCH_LIST=6.0,7.0` makes it work for both P100s and V100s. - Visit [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus) to find out - the correct compute compatibility number for your device. - -
      - -
      - -Undefined CUDA symbols or cannot open libcudart.so. - -
      -The version of NVCC you use to build detectron2 or torchvision does -not match the version of CUDA you are running with. -This often happens when using anaconda's CUDA runtime. - -Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions. -In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA" -to contain cuda libraries of the same version. - -When they are inconsistent, -you need to either install a different build of PyTorch (or build by yourself) -to match your local CUDA installation, or install a different version of CUDA to match PyTorch. -
      - - -
      - -"ImportError: cannot import name '_C'". - -
      -Please build and install detectron2 following the instructions above. - -If you are running code from detectron2's root directory, `cd` to a different one. -Otherwise you may not import the code that you installed. -
      - -
      - -ONNX conversion segfault after some "TraceWarning". - -
      -Build and install ONNX from its source code using a compiler -whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`). -
      diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/optim.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/optim.py deleted file mode 100644 index e647d9b770e299784dab0667e356698c4c471433..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/utils/optim.py +++ /dev/null @@ -1,73 +0,0 @@ -# -------------------------------------------------------- -# OpenVQA -# Written by Yuhao Cui https://github.com/cuiyuhao1996 -# -------------------------------------------------------- - -import torch.optim as Optim - - -class WarmupOptimizer(object): - def __init__(self, lr_base, optimizer, data_size, batch_size, warmup_epoch): - self.optimizer = optimizer - self._step = 0 - self.lr_base = lr_base - self._rate = 0 - self.data_size = data_size - self.batch_size = batch_size - self.warmup_epoch = warmup_epoch - - - def step(self): - self._step += 1 - - rate = self.rate() - for p in self.optimizer.param_groups: - p['lr'] = rate - self._rate = rate - - self.optimizer.step() - - - def zero_grad(self): - self.optimizer.zero_grad() - - - def rate(self, step=None): - if step is None: - step = self._step - - if step <= int(self.data_size / self.batch_size * (self.warmup_epoch + 1) * 0.25): - r = self.lr_base * 1/(self.warmup_epoch + 1) - elif step <= int(self.data_size / self.batch_size * (self.warmup_epoch + 1) * 0.5): - r = self.lr_base * 2/(self.warmup_epoch + 1) - elif step <= int(self.data_size / self.batch_size * (self.warmup_epoch + 1) * 0.75): - r = self.lr_base * 3/(self.warmup_epoch + 1) - else: - r = self.lr_base - - return r - - -def get_optim(__C, model, data_size, lr_base=None): - if lr_base is None: - lr_base = __C.LR_BASE - - std_optim = getattr(Optim, __C.OPT) - params = filter(lambda p: p.requires_grad, model.parameters()) - eval_str = 'params, lr=0' - for key in __C.OPT_PARAMS: - eval_str += ' ,' + key + '=' + str(__C.OPT_PARAMS[key]) - - optim = WarmupOptimizer( - lr_base, - eval('std_optim' + '(' + eval_str + ')'), - data_size, - __C.BATCH_SIZE, - __C.WARMUP_EPOCH - ) - - return optim - - -def adjust_lr(optim, decay_r): - optim.lr_base *= decay_r diff --git a/spaces/CVPR/LIVE/atomic.cpp b/spaces/CVPR/LIVE/atomic.cpp deleted file mode 100644 index 9c642b9b84357a10f2155d28324517f36d00b0cb..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/atomic.cpp +++ /dev/null @@ -1,27 +0,0 @@ -//A hacky solution to get around the Ellipse include - -#ifdef WIN32 -#include -#include - -float win_atomic_add(float &target, float source) { - union { int i; float f; } old_val; - union { int i; float f; } new_val; - do { - old_val.f = target; - new_val.f = old_val.f + (float)source; - } while (InterlockedCompareExchange((LONG*)&target, (LONG)new_val.i, (LONG)old_val.i) != old_val.i); - return old_val.f; -} - -double win_atomic_add(double &target, double source) { - union { int64_t i; double f; } old_val; - union { int64_t i; double f; } new_val; - do { - old_val.f = target; - new_val.f = old_val.f + (double)source; - } while (InterlockedCompareExchange64((LONG64*)&target, (LONG64)new_val.i, (LONG64)old_val.i) != old_val.i); - return old_val.f; -} - -#endif \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/dependencies/cub/experimental/spmv_script.sh b/spaces/CVPR/LIVE/thrust/dependencies/cub/experimental/spmv_script.sh deleted file mode 100644 index f43204315a3d136e50c4fc8bee3d57622fa5c7be..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/dependencies/cub/experimental/spmv_script.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -for i in 1 2 4 8 16 32 64 128 256 512 1024 2048 4096 8192 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304 8388608 16777216 -do - echo `date`, `$1 --dense=$i $2 $3 $4 $5 $6 $7` -done - -echo -echo - -for i in `ls /home/dumerrill/graphs/spmv/*.mtx` -do - if [[ ( "`head -n 50 $i | grep complex`" = "" ) && ( "`head -n 50 $i | grep array`" = "" ) ]] - then - echo `date`, `$1 --mtx=$i $2 $3 $4 $5 $6 $7 2>/dev/null` - fi -done - -echo -echo - -for i in `ls /scratch/dumerrill/graphs/mtx/*.mtx` -#for i in `ls /cygdrive/w/Dev/UFget/mtx/*.mtx` -do - if [[ ( "`head -n 50 $i | grep complex`" = "" ) && ( "`head -n 50 $i | grep array`" = "" ) ]] - then - echo `date`, `$1 --mtx=$i $2 $3 $4 $5 $6 $7 2>/dev/null` - fi -done - diff --git a/spaces/CVPR/LIVE/thrust/thrust/addressof.h b/spaces/CVPR/LIVE/thrust/thrust/addressof.h deleted file mode 100644 index fa9e41c8efadf3458f3f2ed0b0ff8e281150bc9c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/addressof.h +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2018 NVIDIA Corporation -// Author: Bryce Adelstein Lelbach -// -// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) - -#pragma once - -#include - -#if THRUST_CPP_DIALECT >= 2011 -# include -#endif - -namespace thrust -{ - -/////////////////////////////////////////////////////////////////////////////// - -/*! Obtains the actual address of the object or function arg, even in presence of overloaded operator&. - */ -template -__host__ __device__ -T* addressof(T& arg) -{ - return reinterpret_cast( - &const_cast(reinterpret_cast(arg)) - ); -} - -/////////////////////////////////////////////////////////////////////////////// - -} // end namespace thrust - diff --git a/spaces/CVPR/Text2Human/Text2Human/data/pose_attr_dataset.py b/spaces/CVPR/Text2Human/Text2Human/data/pose_attr_dataset.py deleted file mode 100644 index 7245846cb321db91c7935edbae83f7c451110725..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/data/pose_attr_dataset.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -import os.path -import random - -import numpy as np -import torch -import torch.utils.data as data -from PIL import Image - - -class DeepFashionAttrPoseDataset(data.Dataset): - - def __init__(self, - pose_dir, - texture_ann_dir, - shape_ann_path, - downsample_factor=2, - xflip=False): - self._densepose_path = pose_dir - self._image_fnames_target = [] - self._image_fnames = [] - self.upper_fused_attrs = [] - self.lower_fused_attrs = [] - self.outer_fused_attrs = [] - self.shape_attrs = [] - - self.downsample_factor = downsample_factor - self.xflip = xflip - - # load attributes - assert os.path.exists(f'{texture_ann_dir}/upper_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{texture_ann_dir}/upper_fused.txt'), 'r')): - annotations = row.split() - self._image_fnames_target.append(annotations[0]) - self._image_fnames.append(f'{annotations[0].split(".")[0]}.png') - self.upper_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames_target) == len(self.upper_fused_attrs) - - assert os.path.exists(f'{texture_ann_dir}/lower_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{texture_ann_dir}/lower_fused.txt'), 'r')): - annotations = row.split() - assert self._image_fnames_target[idx] == annotations[0] - self.lower_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames_target) == len(self.lower_fused_attrs) - - assert os.path.exists(f'{texture_ann_dir}/outer_fused.txt') - for idx, row in enumerate( - open(os.path.join(f'{texture_ann_dir}/outer_fused.txt'), 'r')): - annotations = row.split() - assert self._image_fnames_target[idx] == annotations[0] - self.outer_fused_attrs.append(int(annotations[1])) - - assert len(self._image_fnames_target) == len(self.outer_fused_attrs) - - assert os.path.exists(shape_ann_path) - for idx, row in enumerate(open(os.path.join(shape_ann_path), 'r')): - annotations = row.split() - assert self._image_fnames_target[idx] == annotations[0] - self.shape_attrs.append([int(i) for i in annotations[1:]]) - - def _open_file(self, path_prefix, fname): - return open(os.path.join(path_prefix, fname), 'rb') - - def _load_densepose(self, raw_idx): - fname = self._image_fnames[raw_idx] - fname = f'{fname[:-4]}_densepose.png' - with self._open_file(self._densepose_path, fname) as f: - densepose = Image.open(f) - if self.downsample_factor != 1: - width, height = densepose.size - width = width // self.downsample_factor - height = height // self.downsample_factor - densepose = densepose.resize( - size=(width, height), resample=Image.NEAREST) - # channel-wise IUV order, [3, H, W] - densepose = np.array(densepose)[:, :, 2:].transpose(2, 0, 1) - return densepose.astype(np.float32) - - def __getitem__(self, index): - pose = self._load_densepose(index) - shape_attr = self.shape_attrs[index] - shape_attr = torch.LongTensor(shape_attr) - - if self.xflip and random.random() > 0.5: - pose = pose[:, :, ::-1].copy() - - upper_fused_attr = self.upper_fused_attrs[index] - lower_fused_attr = self.lower_fused_attrs[index] - outer_fused_attr = self.outer_fused_attrs[index] - - pose = pose / 12. - 1 - - return_dict = { - 'densepose': pose, - 'img_name': self._image_fnames_target[index], - 'shape_attr': shape_attr, - 'upper_fused_attr': upper_fused_attr, - 'lower_fused_attr': lower_fused_attr, - 'outer_fused_attr': outer_fused_attr, - } - - return return_dict - - def __len__(self): - return len(self._image_fnames) diff --git a/spaces/CVPR/WALT/mmdet/apis/test.py b/spaces/CVPR/WALT/mmdet/apis/test.py deleted file mode 100644 index 68b2347e4c2c12b23c7ebc0c0b066735d23cda1b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/apis/test.py +++ /dev/null @@ -1,189 +0,0 @@ -import os.path as osp -import pickle -import shutil -import tempfile -import time - -import mmcv -import torch -import torch.distributed as dist -from mmcv.image import tensor2imgs -from mmcv.runner import get_dist_info - -from mmdet.core import encode_mask_results - - -def single_gpu_test(model, - data_loader, - show=False, - out_dir=None, - show_score_thr=0.3): - model.eval() - results = [] - dataset = data_loader.dataset - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - - batch_size = len(result) - if show or out_dir: - if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): - img_tensor = data['img'][0] - else: - img_tensor = data['img'][0].data[0] - img_metas = data['img_metas'][0].data[0] - imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) - assert len(imgs) == len(img_metas) - - for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): - h, w, _ = img_meta['img_shape'] - img_show = img[:h, :w, :] - - ori_h, ori_w = img_meta['ori_shape'][:-1] - img_show = mmcv.imresize(img_show, (ori_w, ori_h)) - - if out_dir: - out_file = osp.join(out_dir, img_meta['ori_filename']) - else: - out_file = None - model.module.show_result( - img_show, - result[i], - show=show, - out_file=out_file, - score_thr=show_score_thr) - - # encode mask results - if isinstance(result[0], tuple): - result = [(bbox_results, encode_mask_results(mask_results)) - for bbox_results, mask_results in result] - results.extend(result) - - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' - it encodes results to gpu tensors and use gpu communication for results - collection. On cpu mode it saves the results on different gpus to 'tmpdir' - and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (nn.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - - Returns: - list: The prediction results. - """ - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - time.sleep(2) # This line can prevent deadlock problem in some cases. - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - # encode mask results - if isinstance(result[0], tuple): - result = [(bbox_results, encode_mask_results(mask_results)) - for bbox_results, mask_results in result] - results.extend(result) - - if rank == 0: - batch_size = len(result) - for _ in range(batch_size * world_size): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - mmcv.mkdir_or_exist('.dist_test') - tmpdir = tempfile.mkdtemp(dir='.dist_test') - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, f'part_{i}.pkl') - part_list.append(mmcv.load(part_file)) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_list.append( - pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/spaces/CVPR/ml-talking-face/docs/description.md b/spaces/CVPR/ml-talking-face/docs/description.md deleted file mode 100644 index 5abfc29db7ff6ab2f2e2103794554dd675d5b18f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/ml-talking-face/docs/description.md +++ /dev/null @@ -1,33 +0,0 @@ -This system generates a talking face video based on the input text. -You can provide the input text in one of the four languages: Chinese (Mandarin), English, Japanese, and Korean. -You may also select the target language, the language of the output speech. -If the input text language and the target language are different, the input text will be translated to the target language using Google Translate API. - -### Updates - -(2023.10.20.) It has been a year since the demonstration has suddenly shut down by MINDsLab (MAUM.AI for now). -And today, I'm happy to share that ⭐I have restored the demonstration⭐ in my own lambdalabs instance! -Over the past year, there have been numerous advancements in Gen AI, including multilingual TTS and talking face generation. -This demo may become "old-fashioned" at this time 😅... but I hope that it would help other researchers taking a journey in the same field. - -⚠️By the way, I'm using A10G instance from lambdalabs with my own expense... I'm sorry, but I don't know when it will shut down again. 😵‍💫 I'll keep you posted on the status. - -
      Buy Me A Coffee
      - - -(2022.06.17.) Thank you for visiting our demo!😊 This demo attracted a lot more attention than we anticipated. This, unfortunately, means that the computational burden is heavier than this demo was designed for. So, to maximize everyone's experience, we capped the length of the translated texts at: - -- 200 characters for English -- 100 characters for Chinese, Japaense, and Korean. - -(2022.06.17.) We were originally planning to support any input text. However, when checking the logs recently, we found that there were a lot of inappropriate input texts. So, we decided to filter the inputs based on toxicity using [Perspective API @Google](https://developers.perspectiveapi.com/s/). Now, if you enter a possibily toxic text, the video generation will fail. We hope you understand. - -(2022.06.05.) Due to the latency from HuggingFace Spaces and video rendering, it takes 15 ~ 30 seconds to get a video result. - -
      -Outdated updates - -(2022.09.29.) ~~The core part of the demonstration has been working on the AWS instance of MINDsLab, and I found that it can't connect to the instance now. I want to fix this issue, but I'm sorry to say that I left the company last week. I've contacted the company, but it takes some time to restore the session. If you're in a hurry, please send the e-mail directly to MINDsLab (hello@mindslab.ai). -Whatever the reason, I'm sorry again. Hope you understand.~~ - -
      \ No newline at end of file diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/README.md b/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/README.md deleted file mode 100644 index 6256d2b7f5a387988338d538df4e699eb17ba702..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Segment Anything - -**[Meta AI Research, FAIR](https://ai.facebook.com/research/)** - -[Alexander Kirillov](https://alexander-kirillov.github.io/), [Eric Mintun](https://ericmintun.github.io/), [Nikhila Ravi](https://nikhilaravi.com/), [Hanzi Mao](https://hanzimao.me/), Chloe Rolland, Laura Gustafson, [Tete Xiao](https://tetexiao.com), [Spencer Whitehead](https://www.spencerwhitehead.com/), Alex Berg, Wan-Yen Lo, [Piotr Dollar](https://pdollar.github.io/), [Ross Girshick](https://www.rossgirshick.info/) - -[[`Paper`](https://ai.facebook.com/research/publications/segment-anything/)] [[`Project`](https://segment-anything.com/)] [[`Demo`](https://segment-anything.com/demo)] [[`Dataset`](https://segment-anything.com/dataset/index.html)] [[`Blog`](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/)] - -![SAM design](assets/model_diagram.png?raw=true) - -The **Segment Anything Model (SAM)** produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. It has been trained on a [dataset](https://segment-anything.com/dataset/index.html) of 11 million images and 1.1 billion masks, and has strong zero-shot performance on a variety of segmentation tasks. - -

      - - -

      - -## Installation - -The code requires `python>=3.8`, as well as `pytorch>=1.7` and `torchvision>=0.8`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. Installing both PyTorch and TorchVision with CUDA support is strongly recommended. - -Install Segment Anything: - -``` -pip install git+https://github.com/facebookresearch/segment-anything.git -``` - -or clone the repository locally and install with - -``` -git clone git@github.com:facebookresearch/segment-anything.git -cd segment-anything; pip install -e . -``` - -The following optional dependencies are necessary for mask post-processing, saving masks in COCO format, the example notebooks, and exporting the model in ONNX format. `jupyter` is also required to run the example notebooks. -``` -pip install opencv-python pycocotools matplotlib onnxruntime onnx -``` - - -## Getting Started - -First download a [model checkpoint](#model-checkpoints). Then the model can be used in just a few lines to get masks from a given prompt: - -``` -from segment_anything import build_sam, SamPredictor -predictor = SamPredictor(build_sam(checkpoint="")) -predictor.set_image() -masks, _, _ = predictor.predict() -``` - -or generate masks for an entire image: - -``` -from segment_anything import build_sam, SamAutomaticMaskGenerator -mask_generator = SamAutomaticMaskGenerator(build_sam(checkpoint="")) -masks = mask_generator_generate() -``` - -Additionally, masks can be generated for images from the command line: - -``` -python scripts/amg.py --checkpoint --input --output -``` - -See the examples notebooks on [using SAM with prompts](/notebooks/predictor_example.ipynb) and [automatically generating masks](/notebooks/automatic_mask_generator_example.ipynb) for more details. - -

      - - -

      - -## ONNX Export - -SAM's lightweight mask decoder can be exported to ONNX format so that it can be run in any environment that supports ONNX runtime, such as in-browser as showcased in the [demo](https://segment-anything.com/demo). Export the model with - -``` -python scripts/export_onnx_model.py --checkpoint --output -``` - -See the [example notebook](https://github.com/facebookresearch/segment-anything/blob/main/notebooks/onnx_model_example.ipynb) for details on how to combine image preprocessing via SAM's backbone with mask prediction using the ONNX model. It is recommended to use the latest stable version of PyTorch for ONNX export. - -## Model Checkpoints - -Three model versions of the model are available with different backbone sizes. These models can be instantiated by running -``` -from segment_anything import sam_model_registry -sam = sam_model_registry[""](checkpoint="") -``` -Click the links below to download the checkpoint for the corresponding model name. The default model in bold can also be instantiated with `build_sam`, as in the examples in [Getting Started](#getting-started). - -* **`default` or `vit_h`: [ViT-H SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth)** -* `vit_l`: [ViT-L SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth) -* `vit_b`: [ViT-B SAM model.](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth) - -## License -The model is licensed under the [Apache 2.0 license](LICENSE). - -## Contributing - -See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). - -## Contributors - -The Segment Anything project was made possible with the help of many contributors (alphabetical): - -Aaron Adcock, Vaibhav Aggarwal, Morteza Behrooz, Cheng-Yang Fu, Ashley Gabriel, Ahuva Goldstand, Allen Goodman, Sumanth Gurram, Jiabo Hu, Somya Jain, Devansh Kukreja, Robert Kuo, Joshua Lane, Yanghao Li, Lilian Luong, Jitendra Malik, Mallika Malhotra, William Ngan, Omkar Parkhi, Nikhil Raina, Dirk Rowe, Neil Sejoor, Vanessa Stark, Bala Varadarajan, Bram Wasti, Zachary Winstrom diff --git a/spaces/Chris4K/llms_compare/Jumanji-Welcome-To-The-Jungle-English-Dual-Audio-Eng-Hindi-1080p.md b/spaces/Chris4K/llms_compare/Jumanji-Welcome-To-The-Jungle-English-Dual-Audio-Eng-Hindi-1080p.md deleted file mode 100644 index 8290f1b67c500c8b36e21784de3b3c2822643aaf..0000000000000000000000000000000000000000 --- a/spaces/Chris4K/llms_compare/Jumanji-Welcome-To-The-Jungle-English-Dual-Audio-Eng-Hindi-1080p.md +++ /dev/null @@ -1,80 +0,0 @@ -## Jumanji: Welcome to The Jungle (English) dual audio eng hindi 1080p - - - - - - - - - -**Jumanji: Welcome To The Jungle (English) Dual Audio Eng Hindi 1080p [https://www.google.com/url?q=https%3A%2F%2Furllie.com%2F2txP3J&sa=D&sntz=1&usg=AOvVaw3aRa6XhDCOE--8taplWh7E](https://www.google.com/url?q=https%3A%2F%2Furllie.com%2F2txP3J&sa=D&sntz=1&usg=AOvVaw3aRa6XhDCOE--8taplWh7E)** - - - - - - - - - - - - Here is a possible title and article for your keyword: - -# Jumanji: Welcome to the Jungle - A Fun Adventure Movie with Dual Audio in English and Hindi - - - -If you are looking for a fun and exciting movie to watch with your family or friends, you might want to check out Jumanji: Welcome to the Jungle. This is a 2017 adventure comedy film that is a sequel to the 1995 classic Jumanji. It stars Dwayne Johnson, Jack Black, Kevin Hart, Karen Gillan, Nick Jonas, and Bobby Cannavale as a group of teenagers who get sucked into a video game version of Jumanji and have to survive its dangers as different avatars. - - - -One of the best features of this movie is that it has dual audio in English and Hindi. This means that you can enjoy the movie in your preferred language without missing any of the jokes or dialogues. You can also switch between the languages anytime you want with the help of subtitles. This way, you can experience the movie in a more immersive and engaging way. - - - -Jumanji: Welcome to the Jungle is a movie that has something for everyone. It has action, comedy, romance, fantasy, and drama. It also has amazing visuals and special effects that will make you feel like you are in the game world. The movie has received positive reviews from critics and audiences alike, and has grossed over $962 million worldwide. It is one of the highest-grossing films of 2017 and one of the most successful films in the Jumanji franchise. - - - -If you want to watch Jumanji: Welcome to the Jungle in dual audio with 1080p resolution, you can download it from various online sources. However, you should be careful about the quality and legality of the downloads. Some of them might be fake or contain viruses or malware that can harm your device. To avoid these risks, you should only download from trusted and verified sites that offer high-quality and safe downloads. - - - -One of such sites is **opensubtitles.com**, which is a popular and reliable platform for downloading subtitles and movies in different languages. You can find Jumanji: Welcome to the Jungle subtitles in English[^1^] on this site, as well as other languages like Spanish, French, German, etc. You can also find Jumanji: Welcome to the Jungle movies in dual audio in English and Hindi[^3^] on this site, as well as other formats like BluRay, DVD, etc. You can download these files easily and quickly with just a few clicks. - - - -Jumanji: Welcome to the Jungle is a movie that you don't want to miss. It is a fun and thrilling adventure that will keep you entertained from start to finish. With dual audio in English and Hindi, you can enjoy it even more in your preferred language. Download it today from opensubtitles.com and have a great time watching it! - -Here is a possible continuation of the article: - -But what is Jumanji: Welcome to the Jungle about? And how is it different from the original Jumanji? Well, let's find out. - - - -The movie starts with four high school students who are given detention for various reasons. They are Spencer, a nerdy gamer; Fridge, a football star; Bethany, a self-absorbed beauty; and Martha, a shy and smart girl. They are assigned to clean up an old storage room, where they find an old video game console with a cartridge of Jumanji. Curious, they decide to play the game and choose their avatars. However, they soon realize that they are not just playing the game, but are actually in the game. - - - -They are transported to a jungle setting, where they discover that they have become their avatars. Spencer is now Dr. Smolder Bravestone, a muscular and charismatic explorer; Fridge is now Franklin "Mouse" Finbar, a short and weak zoologist; Bethany is now Professor Sheldon "Shelly" Oberon, an overweight and middle-aged cartographer; and Martha is now Ruby Roundhouse, a sexy and skilled martial artist. They also learn that they have three lives each, and if they lose them all, they die for real. - - - -They meet Nigel, an NPC (non-player character) who gives them their mission: to return a magical jewel called the Jaguar's Eye to a giant statue and lift the curse that has fallen upon Jumanji. The jewel was stolen by Van Pelt, a corrupt explorer who has gained control over the animals of the jungle. Along the way, they encounter various obstacles and enemies, such as snakes, hippos, crocodiles, bikers, and Van Pelt's henchmen. They also meet Alex, another player who has been stuck in the game for 20 years as Jefferson "Seaplane" McDonough, a pilot and adventurer. - - - -As they progress through the game, they learn to work together and use their strengths and weaknesses to their advantage. They also learn more about themselves and each other, and develop friendships and romances. They realize that Jumanji is not just a game, but a test of their courage and character. Will they be able to complete the game and return to their normal lives? Or will they be trapped in Jumanji forever? - - - -To find out the answer, you have to watch Jumanji: Welcome to the Jungle in dual audio with 1080p resolution. It is a movie that will make you laugh, cry, cheer, and gasp. It is a movie that will make you feel like you are part of the adventure. It is a movie that you will love. - - dfd1c89656 - - - - - diff --git a/spaces/CikeyQI/meme-api/docs/docker.md b/spaces/CikeyQI/meme-api/docs/docker.md deleted file mode 100644 index 009ee8ca410293773e8e9acf97d0c0083c987585..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/docs/docker.md +++ /dev/null @@ -1,56 +0,0 @@ -https://github.com/MeetWq/meme-generator - -## Docker部署 - -### 运行 - -```shell -docker run -d \ - --name=meme-generator \ - -p 2233:2233 \ - --restart always \ - meetwq/meme-generator:latest -``` - -运行后可通过 api 方式调用 - - -### 环境变量 - -| 变量名 | 默认值 | 说明 | -| --- | --- | --- | -| `MEME_DIRS` | `'["/data/memes"]'` | 额外表情路径 | -| `MEME_DISABLED_LIST` | `'[]'` | 禁用表情列表 | -| `GIF_MAX_SIZE` | `10.0` | 限制生成的 gif 文件大小 | -| `GIF_MAX_FRAMES` | `100` | 限制生成的 gif 文件帧数 | -| `BAIDU_TRANS_APPID` | `''` | 百度翻译 appid | -| `BAIDU_TRANS_APIKEY` | `''` | 百度翻译 apikey | -| `LOG_LEVEL` | `'INFO'` | 日志等级 | - - -### 加载额外表情 - -可通过 `MEME_DIRS` 环境变量指定额外表情路径,默认为 `["/data/memes"]` - -可将 docker 路径 `/data` 映射到本地路径 `` - -将额外表情放置于 `/memes` 即可 - - -完整的运行示例: - -```shell -docker run -d \ - --name=meme-generator \ - -p 2233:2233 \ - --restart always \ - -v :/data \ - -e MEME_DIRS='["/data/memes"]' \ - -e MEME_DISABLED_LIST='[]' \ - -e GIF_MAX_SIZE=10.0 \ - -e GIF_MAX_FRAMES=100 \ - -e BAIDU_TRANS_APPID= \ - -e BAIDU_TRANS_APIKEY= \ - -e LOG_LEVEL='INFO' \ - meetwq/meme-generator:main -``` diff --git a/spaces/DHEIVER/ImageClassifierCataract/app.py b/spaces/DHEIVER/ImageClassifierCataract/app.py deleted file mode 100644 index d2a3f6a40d0b17a4c08997b97e34e4da6e3aae1b..0000000000000000000000000000000000000000 --- a/spaces/DHEIVER/ImageClassifierCataract/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import tensorflow as tf -from keras.utils import custom_object_scope -import gradio as gr -from PIL import Image -import numpy as np -import datetime - -# Defina a camada personalizada FixedDropout -class FixedDropout(tf.keras.layers.Dropout): - def __init__(self, rate, **kwargs): - super().__init__(rate, **kwargs) - self._rate = rate - - def call(self, inputs): - return tf.nn.dropout(inputs, self._rate) - -# Registre a camada personalizada FixedDropout com o TensorFlow -with custom_object_scope({'FixedDropout': FixedDropout}): - # Carregue o modelo - loaded_model = tf.keras.models.load_model('modelo_treinado.h5') - -# Crie uma lista de classes -class_names = ["Normal", "Cataract"] - -# Defina a função de classificação -def classify_image(inp): - # Redimensione a imagem para o formato esperado pelo modelo (192x256) - img = Image.fromarray(inp).resize((256, 192)) - - # Converta a imagem para um array numpy e normalize-a (escala de 0 a 1) - img = np.array(img) / 255.0 - - # Faça uma previsão usando o modelo treinado - prediction = loaded_model.predict(np.expand_dims(img, axis=0)).flatten() - - # Obtém a classe prevista - predicted_class = class_names[np.argmax(prediction)] - - # Obtém a data e hora atual - data_hora = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - - # Formate a saída em HTML - result_html = f""" -

      Resultado da Classificação

      -

      Categoria Predita: {predicted_class}

      -

      Data e Hora: {data_hora}

      - """ - - return result_html - -# Crie uma interface Gradio com descrição formatada em HTML -iface = gr.Interface( - fn=classify_image, - inputs=gr.inputs.Image(shape=(192, 256)), - outputs=gr.outputs.HTML(), # Saída formatada com HTML - live=True, - title="Classificador de Catarata", - description=""" -

      Este é um classificador de imagens médicas para detectar catarata. Faça o upload de uma imagem e clique em 'Classificar' para obter a categoria predita.

      -

      Como usar:

      -
        -
      1. Clique no botão 'Escolher Arquivo' abaixo para fazer o upload de uma imagem.
      2. -
      3. Depois de fazer o upload, clique em 'Classificar' para iniciar a análise.
      4. -
      5. A categoria predita e a data/hora da classificação serão exibidas abaixo.
      6. -
      -

      Observação: Este modelo é destinado apenas para fins de demonstração e não deve substituir a avaliação de um médico.

      - """, - allow_screenshot=False, - allow_flagging=False, - layout="vertical", - capture_session=True, - theme="default", -) - - -# Inicie a interface Gradio -iface.launch() diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/base.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/base.py deleted file mode 100644 index 4a2ae002ef07b32912b98677c3fdc47bb1a97015..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/base.py +++ /dev/null @@ -1,407 +0,0 @@ -"""Contains all of the components that can be used with Gradio Interface / Blocks. -Along with the docs for each component, you can find the names of example demos that use -each component. These demos are located in the `demo` directory.""" - -from __future__ import annotations - -import hashlib -import os -import secrets -import shutil -import tempfile -import urllib.request -from enum import Enum -from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable - -import aiofiles -import numpy as np -import requests -from fastapi import UploadFile -from gradio_client import utils as client_utils -from gradio_client.documentation import set_documentation_group -from gradio_client.serializing import ( - Serializable, -) -from PIL import Image as _Image # using _ to minimize namespace pollution - -from gradio import processing_utils, utils -from gradio.blocks import Block, BlockContext -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import ( - EventListener, -) -from gradio.layouts import Column, Form, Row - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: list[str] - data: list[list[str | int | bool]] - - -set_documentation_group("component") -_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843 - - -class _Keywords(Enum): - NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()` - FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state) - - -class Component(Block, Serializable): - """ - A base class for defining the methods that all gradio components should have. - """ - - def __init__(self, *args, **kwargs): - Block.__init__(self, *args, **kwargs) - EventListener.__init__(self) - - def __str__(self): - return self.__repr__() - - def __repr__(self): - return f"{self.get_block_name()}" - - def get_config(self): - """ - :return: a dictionary with context variables for the javascript file associated with the context - """ - return { - "name": self.get_block_name(), - **super().get_config(), - } - - def preprocess(self, x: Any) -> Any: - """ - Any preprocessing needed to be performed on function input. - """ - return x - - def postprocess(self, y): - """ - Any postprocessing needed to be performed on function output. - """ - return y - - def style(self, *args, **kwargs): - """ - This method is deprecated. Please set these arguments in the Components constructor instead. - """ - warn_style_method_deprecation() - put_deprecated_params_in_box = False - if "rounded" in kwargs: - warn_deprecation( - "'rounded' styling is no longer supported. To round adjacent components together, place them in a Column(variant='box')." - ) - if isinstance(kwargs["rounded"], (list, tuple)): - put_deprecated_params_in_box = True - kwargs.pop("rounded") - if "margin" in kwargs: - warn_deprecation( - "'margin' styling is no longer supported. To place adjacent components together without margin, place them in a Column(variant='box')." - ) - if isinstance(kwargs["margin"], (list, tuple)): - put_deprecated_params_in_box = True - kwargs.pop("margin") - if "border" in kwargs: - warn_deprecation( - "'border' styling is no longer supported. To place adjacent components in a shared border, place them in a Column(variant='box')." - ) - kwargs.pop("border") - for key in kwargs: - warn_deprecation(f"Unknown style parameter: {key}") - if ( - put_deprecated_params_in_box - and isinstance(self.parent, (Row, Column)) - and self.parent.variant == "default" - ): - self.parent.variant = "compact" - return self - - -class IOComponent(Component): - """ - A base class for defining methods that all input/output components should have. - """ - - def __init__( - self, - *, - value: Any = None, - label: str | None = None, - info: str | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - load_fn: Callable | None = None, - every: float | None = None, - **kwargs, - ): - self.temp_files: set[str] = set() - self.DEFAULT_TEMP_DIR = os.environ.get("GRADIO_TEMP_DIR") or str( - Path(tempfile.gettempdir()) / "gradio" - ) - - Component.__init__( - self, elem_id=elem_id, elem_classes=elem_classes, visible=visible, **kwargs - ) - - self.label = label - self.info = info - if not container: - if show_label: - warn_deprecation("show_label has no effect when container is False.") - show_label = False - if show_label is None: - show_label = True - self.show_label = show_label - self.container = container - if scale is not None and scale != round(scale): - warn_deprecation( - f"'scale' value should be an integer. Using {scale} will cause issues." - ) - self.scale = scale - self.min_width = min_width - self.interactive = interactive - - # load_event is set in the Blocks.attach_load_events method - self.load_event: None | dict[str, Any] = None - self.load_event_to_attach = None - load_fn, initial_value = self.get_load_fn_and_initial_value(value) - self.value = ( - initial_value - if self._skip_init_processing - else self.postprocess(initial_value) - ) - if callable(load_fn): - self.attach_load_event(load_fn, every) - - @staticmethod - def hash_file(file_path: str | Path, chunk_num_blocks: int = 128) -> str: - sha1 = hashlib.sha1() - with open(file_path, "rb") as f: - for chunk in iter(lambda: f.read(chunk_num_blocks * sha1.block_size), b""): - sha1.update(chunk) - return sha1.hexdigest() - - @staticmethod - def hash_url(url: str, chunk_num_blocks: int = 128) -> str: - sha1 = hashlib.sha1() - remote = urllib.request.urlopen(url) - max_file_size = 100 * 1024 * 1024 # 100MB - total_read = 0 - while True: - data = remote.read(chunk_num_blocks * sha1.block_size) - total_read += chunk_num_blocks * sha1.block_size - if not data or total_read > max_file_size: - break - sha1.update(data) - return sha1.hexdigest() - - @staticmethod - def hash_bytes(bytes: bytes): - sha1 = hashlib.sha1() - sha1.update(bytes) - return sha1.hexdigest() - - @staticmethod - def hash_base64(base64_encoding: str, chunk_num_blocks: int = 128) -> str: - sha1 = hashlib.sha1() - for i in range(0, len(base64_encoding), chunk_num_blocks * sha1.block_size): - data = base64_encoding[i : i + chunk_num_blocks * sha1.block_size] - sha1.update(data.encode("utf-8")) - return sha1.hexdigest() - - def make_temp_copy_if_needed(self, file_path: str | Path) -> str: - """Returns a temporary file path for a copy of the given file path if it does - not already exist. Otherwise returns the path to the existing temp file.""" - temp_dir = self.hash_file(file_path) - temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - name = client_utils.strip_invalid_filename_characters(Path(file_path).name) - full_temp_file_path = str(utils.abspath(temp_dir / name)) - - if not Path(full_temp_file_path).exists(): - shutil.copy2(file_path, full_temp_file_path) - - self.temp_files.add(full_temp_file_path) - return full_temp_file_path - - async def save_uploaded_file(self, file: UploadFile, upload_dir: str) -> str: - temp_dir = secrets.token_hex( - 20 - ) # Since the full file is being uploaded anyways, there is no benefit to hashing the file. - temp_dir = Path(upload_dir) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - if file.filename: - file_name = Path(file.filename).name - name = client_utils.strip_invalid_filename_characters(file_name) - else: - name = f"tmp{secrets.token_hex(5)}" - - full_temp_file_path = str(utils.abspath(temp_dir / name)) - - async with aiofiles.open(full_temp_file_path, "wb") as output_file: - while True: - content = await file.read(100 * 1024 * 1024) - if not content: - break - await output_file.write(content) - - return full_temp_file_path - - def download_temp_copy_if_needed(self, url: str) -> str: - """Downloads a file and makes a temporary file path for a copy if does not already - exist. Otherwise returns the path to the existing temp file.""" - temp_dir = self.hash_url(url) - temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - name = client_utils.strip_invalid_filename_characters(Path(url).name) - full_temp_file_path = str(utils.abspath(temp_dir / name)) - - if not Path(full_temp_file_path).exists(): - with requests.get(url, stream=True) as r, open( - full_temp_file_path, "wb" - ) as f: - shutil.copyfileobj(r.raw, f) - - self.temp_files.add(full_temp_file_path) - return full_temp_file_path - - def base64_to_temp_file_if_needed( - self, base64_encoding: str, file_name: str | None = None - ) -> str: - """Converts a base64 encoding to a file and returns the path to the file if - the file doesn't already exist. Otherwise returns the path to the existing file. - """ - temp_dir = self.hash_base64(base64_encoding) - temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - guess_extension = client_utils.get_extension(base64_encoding) - if file_name: - file_name = client_utils.strip_invalid_filename_characters(file_name) - elif guess_extension: - file_name = f"file.{guess_extension}" - else: - file_name = "file" - - full_temp_file_path = str(utils.abspath(temp_dir / file_name)) # type: ignore - - if not Path(full_temp_file_path).exists(): - data, _ = client_utils.decode_base64_to_binary(base64_encoding) - with open(full_temp_file_path, "wb") as fb: - fb.write(data) - - self.temp_files.add(full_temp_file_path) - return full_temp_file_path - - def pil_to_temp_file(self, img: _Image.Image, dir: str, format="png") -> str: - bytes_data = processing_utils.encode_pil_to_bytes(img, format) - temp_dir = Path(dir) / self.hash_bytes(bytes_data) - temp_dir.mkdir(exist_ok=True, parents=True) - filename = str(temp_dir / f"image.{format}") - img.save(filename, pnginfo=processing_utils.get_pil_metadata(img)) - return filename - - def img_array_to_temp_file(self, arr: np.ndarray, dir: str) -> str: - pil_image = _Image.fromarray( - processing_utils._convert(arr, np.uint8, force_copy=False) - ) - return self.pil_to_temp_file(pil_image, dir, format="png") - - def audio_to_temp_file( - self, data: np.ndarray, sample_rate: int, dir: str, format: str - ): - temp_dir = Path(dir) / self.hash_bytes(data.tobytes()) - temp_dir.mkdir(exist_ok=True, parents=True) - filename = str(temp_dir / f"audio.{format}") - processing_utils.audio_to_file(sample_rate, data, filename, format=format) - return filename - - def file_bytes_to_file(self, data: bytes, dir: str, file_name: str): - path = Path(dir) / self.hash_bytes(data) - path.mkdir(exist_ok=True, parents=True) - path = path / Path(file_name).name - path.write_bytes(data) - return path - - def get_config(self): - config = { - "label": self.label, - "show_label": self.show_label, - "container": self.container, - "scale": self.scale, - "min_width": self.min_width, - "interactive": self.interactive, - **super().get_config(), - } - if self.info: - config["info"] = self.info - return config - - @staticmethod - def get_load_fn_and_initial_value(value): - if callable(value): - initial_value = value() - load_fn = value - else: - initial_value = value - load_fn = None - return load_fn, initial_value - - def attach_load_event(self, callable: Callable, every: float | None): - """Add a load event that runs `callable`, optionally every `every` seconds.""" - self.load_event_to_attach = (callable, every) - - def as_example(self, input_data): - """Return the input data in a way that can be displayed by the examples dataset component in the front-end.""" - return input_data - - -class FormComponent: - def get_expected_parent(self) -> type[Form]: - if getattr(self, "container", None) is False: - return None - return Form - - -def component(cls_name: str) -> Component: - obj = utils.component_or_layout_class(cls_name)() - if isinstance(obj, BlockContext): - raise ValueError(f"Invalid component: {obj.__class__}") - return obj - - -def get_component_instance(comp: str | dict | Component, render=True) -> Component: - if isinstance(comp, str): - component_obj = component(comp) - if not (render): - component_obj.unrender() - return component_obj - elif isinstance(comp, dict): - name = comp.pop("name") - component_cls = utils.component_or_layout_class(name) - component_obj = component_cls(**comp) - if isinstance(component_obj, BlockContext): - raise ValueError(f"Invalid component: {name}") - if not (render): - component_obj.unrender() - return component_obj - elif isinstance(comp, Component): - return comp - else: - raise ValueError( - f"Component must provided as a `str` or `dict` or `Component` but is {comp}" - ) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py deleted file mode 100644 index ae133f447e4edc0c6414062d357d18f1305261dd..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py +++ /dev/null @@ -1,329 +0,0 @@ -from typing import Optional - -from requests import HTTPError, Response - -from ._fixes import JSONDecodeError - - -class HfHubHTTPError(HTTPError): - """ - HTTPError to inherit from for any custom HTTP Error raised in HF Hub. - - Any HTTPError is converted at least into a `HfHubHTTPError`. If some information is - sent back by the server, it will be added to the error message. - - Added details: - - Request id from "X-Request-Id" header if exists. - - Server error message from the header "X-Error-Message". - - Server error message if we can found one in the response body. - - Example: - ```py - import requests - from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError - - response = get_session().post(...) - try: - hf_raise_for_status(response) - except HfHubHTTPError as e: - print(str(e)) # formatted message - e.request_id, e.server_message # details returned by server - - # Complete the error message with additional information once it's raised - e.append_to_message("\n`create_commit` expects the repository to exist.") - raise - ``` - """ - - request_id: Optional[str] = None - server_message: Optional[str] = None - - def __init__(self, message: str, response: Optional[Response] = None): - # Parse server information if any. - if response is not None: - self.request_id = response.headers.get("X-Request-Id") - try: - server_data = response.json() - except JSONDecodeError: - server_data = {} - - # Retrieve server error message from multiple sources - server_message_from_headers = response.headers.get("X-Error-Message") - server_message_from_body = server_data.get("error") - server_multiple_messages_from_body = "\n".join( - error["message"] for error in server_data.get("errors", []) if "message" in error - ) - - # Concatenate error messages - _server_message = "" - if server_message_from_headers is not None: # from headers - _server_message += server_message_from_headers + "\n" - if server_message_from_body is not None: # from body "error" - if isinstance(server_message_from_body, list): - server_message_from_body = "\n".join(server_message_from_body) - if server_message_from_body not in _server_message: - _server_message += server_message_from_body + "\n" - if server_multiple_messages_from_body is not None: # from body "errors" - if server_multiple_messages_from_body not in _server_message: - _server_message += server_multiple_messages_from_body + "\n" - _server_message = _server_message.strip() - - # Set message to `HfHubHTTPError` (if any) - if _server_message != "": - self.server_message = _server_message - - super().__init__( - _format_error_message( - message, - request_id=self.request_id, - server_message=self.server_message, - ), - response=response, - ) - - def append_to_message(self, additional_message: str) -> None: - """Append additional information to the `HfHubHTTPError` initial message.""" - self.args = (self.args[0] + additional_message,) + self.args[1:] - - -class RepositoryNotFoundError(HfHubHTTPError): - """ - Raised when trying to access a hf.co URL with an invalid repository name, or - with a private repo name the user does not have access to. - - Example: - - ```py - >>> from huggingface_hub import model_info - >>> model_info("") - (...) - huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: PvMw_VjBMjVdMz53WKIzP) - - Repository Not Found for url: https://huggingface.co/api/models/%3Cnon_existent_repository%3E. - Please make sure you specified the correct `repo_id` and `repo_type`. - If the repo is private, make sure you are authenticated. - Invalid username or password. - ``` - """ - - -class GatedRepoError(RepositoryNotFoundError): - """ - Raised when trying to access a gated repository for which the user is not on the - authorized list. - - Note: derives from `RepositoryNotFoundError` to ensure backward compatibility. - - Example: - - ```py - >>> from huggingface_hub import model_info - >>> model_info("") - (...) - huggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: ViT1Bf7O_026LGSQuVqfa) - - Cannot access gated repo for url https://huggingface.co/api/models/ardent-figment/gated-model. - Access to model ardent-figment/gated-model is restricted and you are not in the authorized list. - Visit https://huggingface.co/ardent-figment/gated-model to ask for access. - ``` - """ - - -class RevisionNotFoundError(HfHubHTTPError): - """ - Raised when trying to access a hf.co URL with a valid repository but an invalid - revision. - - Example: - - ```py - >>> from huggingface_hub import hf_hub_download - >>> hf_hub_download('bert-base-cased', 'config.json', revision='') - (...) - huggingface_hub.utils._errors.RevisionNotFoundError: 404 Client Error. (Request ID: Mwhe_c3Kt650GcdKEFomX) - - Revision Not Found for url: https://huggingface.co/bert-base-cased/resolve/%3Cnon-existent-revision%3E/config.json. - ``` - """ - - -class EntryNotFoundError(HfHubHTTPError): - """ - Raised when trying to access a hf.co URL with a valid repository and revision - but an invalid filename. - - Example: - - ```py - >>> from huggingface_hub import hf_hub_download - >>> hf_hub_download('bert-base-cased', '') - (...) - huggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: 53pNl6M0MxsnG5Sw8JA6x) - - Entry Not Found for url: https://huggingface.co/bert-base-cased/resolve/main/%3Cnon-existent-file%3E. - ``` - """ - - -class LocalEntryNotFoundError(EntryNotFoundError, FileNotFoundError, ValueError): - """ - Raised when trying to access a file that is not on the disk when network is - disabled or unavailable (connection issue). The entry may exist on the Hub. - - Note: `ValueError` type is to ensure backward compatibility. - Note: `LocalEntryNotFoundError` derives from `HTTPError` because of `EntryNotFoundError` - even when it is not a network issue. - - Example: - - ```py - >>> from huggingface_hub import hf_hub_download - >>> hf_hub_download('bert-base-cased', '', local_files_only=True) - (...) - huggingface_hub.utils._errors.LocalEntryNotFoundError: Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co look-ups and downloads online, set 'local_files_only' to False. - ``` - """ - - def __init__(self, message: str): - super().__init__(message, response=None) - - -class BadRequestError(HfHubHTTPError, ValueError): - """ - Raised by `hf_raise_for_status` when the server returns a HTTP 400 error. - - Example: - - ```py - >>> resp = requests.post("hf.co/api/check", ...) - >>> hf_raise_for_status(resp, endpoint_name="check") - huggingface_hub.utils._errors.BadRequestError: Bad request for check endpoint: {details} (Request ID: XXX) - ``` - """ - - -def hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None) -> None: - """ - Internal version of `response.raise_for_status()` that will refine a - potential HTTPError. Raised exception will be an instance of `HfHubHTTPError`. - - This helper is meant to be the unique method to raise_for_status when making a call - to the Hugging Face Hub. - - Example: - ```py - import requests - from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError - - response = get_session().post(...) - try: - hf_raise_for_status(response) - except HfHubHTTPError as e: - print(str(e)) # formatted message - e.request_id, e.server_message # details returned by server - - # Complete the error message with additional information once it's raised - e.append_to_message("\n`create_commit` expects the repository to exist.") - raise - ``` - - Args: - response (`Response`): - Response from the server. - endpoint_name (`str`, *optional*): - Name of the endpoint that has been called. If provided, the error message - will be more complete. - - - - Raises when the request has failed: - - - [`~utils.RepositoryNotFoundError`] - If the repository to download from cannot be found. This may be because it - doesn't exist, because `repo_type` is not set correctly, or because the repo - is `private` and you do not have access. - - [`~utils.GatedRepoError`] - If the repository exists but is gated and the user is not on the authorized - list. - - [`~utils.RevisionNotFoundError`] - If the repository exists but the revision couldn't be find. - - [`~utils.EntryNotFoundError`] - If the repository exists but the entry (e.g. the requested file) couldn't be - find. - - [`~utils.BadRequestError`] - If request failed with a HTTP 400 BadRequest error. - - [`~utils.HfHubHTTPError`] - If request failed for a reason not listed above. - - - """ - try: - response.raise_for_status() - except HTTPError as e: - error_code = response.headers.get("X-Error-Code") - - if error_code == "RevisionNotFound": - message = f"{response.status_code} Client Error." + "\n\n" + f"Revision Not Found for url: {response.url}." - raise RevisionNotFoundError(message, response) from e - - elif error_code == "EntryNotFound": - message = f"{response.status_code} Client Error." + "\n\n" + f"Entry Not Found for url: {response.url}." - raise EntryNotFoundError(message, response) from e - - elif error_code == "GatedRepo": - message = ( - f"{response.status_code} Client Error." + "\n\n" + f"Cannot access gated repo for url {response.url}." - ) - raise GatedRepoError(message, response) from e - - elif error_code == "RepoNotFound" or response.status_code == 401: - # 401 is misleading as it is returned for: - # - private and gated repos if user is not authenticated - # - missing repos - # => for now, we process them as `RepoNotFound` anyway. - # See https://gist.github.com/Wauplin/46c27ad266b15998ce56a6603796f0b9 - message = ( - f"{response.status_code} Client Error." - + "\n\n" - + f"Repository Not Found for url: {response.url}." - + "\nPlease make sure you specified the correct `repo_id` and" - " `repo_type`.\nIf you are trying to access a private or gated repo," - " make sure you are authenticated." - ) - raise RepositoryNotFoundError(message, response) from e - - elif response.status_code == 400: - message = ( - f"\n\nBad request for {endpoint_name} endpoint:" if endpoint_name is not None else "\n\nBad request:" - ) - raise BadRequestError(message, response=response) from e - - # Convert `HTTPError` into a `HfHubHTTPError` to display request information - # as well (request id and/or server error message) - raise HfHubHTTPError(str(e), response=response) from e - - -def _format_error_message(message: str, request_id: Optional[str], server_message: Optional[str]) -> str: - """ - Format the `HfHubHTTPError` error message based on initial message and information - returned by the server. - - Used when initializing `HfHubHTTPError`. - """ - # Add message from response body - if server_message is not None and len(server_message) > 0 and server_message.lower() not in message.lower(): - if "\n\n" in message: - message += "\n" + server_message - else: - message += "\n\n" + server_message - - # Add Request ID - if request_id is not None and str(request_id).lower() not in message.lower(): - request_id_message = f" (Request ID: {request_id})" - if "\n" in message: - newline_index = message.index("\n") - message = message[:newline_index] + request_id_message + message[newline_index:] - else: - message += request_id_message - - return message diff --git a/spaces/Dao3/openai-translator/README.md b/spaces/Dao3/openai-translator/README.md deleted file mode 100644 index 48b0fffd7ec72e0248a1f2a44d3a152c5695dfa5..0000000000000000000000000000000000000000 --- a/spaces/Dao3/openai-translator/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Openai Translator -emoji: 💻 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: openrail -duplicated_from: nyanko7/openai-translator ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DeepLabCut/MegaDetector_DeepLabCut/README.md b/spaces/DeepLabCut/MegaDetector_DeepLabCut/README.md deleted file mode 100644 index 02f01f1fa3ae6da1adec1169799d0ca9ce7a95a6..0000000000000000000000000000000000000000 --- a/spaces/DeepLabCut/MegaDetector_DeepLabCut/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MegaDetector + DeepLabCut -emoji: 🦣📊💜 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DiffusionArtco/scifi-art-creator/README.md b/spaces/DiffusionArtco/scifi-art-creator/README.md deleted file mode 100644 index f489d55e38d86e4cd0eb889617a379840e7c75bf..0000000000000000000000000000000000000000 --- a/spaces/DiffusionArtco/scifi-art-creator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ImagineAI Imagine Generator -emoji: 💩 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -duplicated_from: DiffusionArtco/AnimeTop50 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py deleted file mode 100644 index ff0c169eabdc579041dac0650fbc6da956646594..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/network.py +++ /dev/null @@ -1,781 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Helper for managing networks.""" - -import types -import inspect -import re -import uuid -import sys -import copy -import numpy as np -import tensorflow as tf - -from collections import OrderedDict -from typing import Any, List, Tuple, Union, Callable - -from . import tfutil -from .. import util - -from .tfutil import TfExpression, TfExpressionEx - -# pylint: disable=protected-access -# pylint: disable=attribute-defined-outside-init -# pylint: disable=too-many-public-methods - -_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import. -_import_module_src = dict() # Source code for temporary modules created during pickle import. - - -def import_handler(handler_func): - """Function decorator for declaring custom import handlers.""" - _import_handlers.append(handler_func) - return handler_func - - -class Network: - """Generic network abstraction. - - Acts as a convenience wrapper for a parameterized network construction - function, providing several utility methods and convenient access to - the inputs/outputs/weights. - - Network objects can be safely pickled and unpickled for long-term - archival purposes. The pickling works reliably as long as the underlying - network construction function is defined in a standalone Python module - that has no side effects or application-specific imports. - - Args: - name: Network name. Used to select TensorFlow name and variable scopes. Defaults to build func name if None. - func_name: Fully qualified name of the underlying network construction function, or a top-level function object. - static_kwargs: Keyword arguments to be passed in to the network construction function. - """ - - def __init__(self, name: str = None, func_name: Any = None, **static_kwargs): - # Locate the user-specified build function. - assert isinstance(func_name, str) or util.is_top_level_function(func_name) - if util.is_top_level_function(func_name): - func_name = util.get_top_level_function_name(func_name) - module, func_name = util.get_module_from_obj_name(func_name) - func = util.get_obj_from_module(module, func_name) - - # Dig up source code for the module containing the build function. - module_src = _import_module_src.get(module, None) - if module_src is None: - module_src = inspect.getsource(module) - - # Initialize fields. - self._init_fields(name=(name or func_name), static_kwargs=static_kwargs, build_func=func, build_func_name=func_name, build_module_src=module_src) - - def _init_fields(self, name: str, static_kwargs: dict, build_func: Callable, build_func_name: str, build_module_src: str) -> None: - tfutil.assert_tf_initialized() - assert isinstance(name, str) - assert len(name) >= 1 - assert re.fullmatch(r"[A-Za-z0-9_.\\-]*", name) - assert isinstance(static_kwargs, dict) - assert util.is_pickleable(static_kwargs) - assert callable(build_func) - assert isinstance(build_func_name, str) - assert isinstance(build_module_src, str) - - # Choose TensorFlow name scope. - with tf.name_scope(None): - scope = tf.get_default_graph().unique_name(name, mark_as_used=True) - - # Query current TensorFlow device. - with tfutil.absolute_name_scope(scope), tf.control_dependencies(None): - device = tf.no_op(name="_QueryDevice").device - - # Immutable state. - self._name = name - self._scope = scope - self._device = device - self._static_kwargs = util.EasyDict(copy.deepcopy(static_kwargs)) - self._build_func = build_func - self._build_func_name = build_func_name - self._build_module_src = build_module_src - - # State before _init_graph(). - self._var_inits = dict() # var_name => initial_value, set to None by _init_graph() - self._all_inits_known = False # Do we know for sure that _var_inits covers all the variables? - self._components = None # subnet_name => Network, None if the components are not known yet - - # Initialized by _init_graph(). - self._input_templates = None - self._output_templates = None - self._own_vars = None - - # Cached values initialized the respective methods. - self._input_shapes = None - self._output_shapes = None - self._input_names = None - self._output_names = None - self._vars = None - self._trainables = None - self._var_global_to_local = None - self._run_cache = dict() - - def _init_graph(self) -> None: - assert self._var_inits is not None - assert self._input_templates is None - assert self._output_templates is None - assert self._own_vars is None - - # Initialize components. - if self._components is None: - self._components = util.EasyDict() - - # Choose build func kwargs. - build_kwargs = dict(self.static_kwargs) - build_kwargs["is_template_graph"] = True - build_kwargs["components"] = self._components - - # Override scope and device, and ignore surrounding control dependencies. - with tfutil.absolute_variable_scope(self.scope, reuse=False), tfutil.absolute_name_scope(self.scope), tf.device(self.device), tf.control_dependencies(None): - assert tf.get_variable_scope().name == self.scope - assert tf.get_default_graph().get_name_scope() == self.scope - - # Create input templates. - self._input_templates = [] - for param in inspect.signature(self._build_func).parameters.values(): - if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty: - self._input_templates.append(tf.placeholder(tf.float32, name=param.name)) - - # Call build func. - out_expr = self._build_func(*self._input_templates, **build_kwargs) - - # Collect output templates and variables. - assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) - self._output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) - self._own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/")) - - # Check for errors. - if len(self._input_templates) == 0: - raise ValueError("Network build func did not list any inputs.") - if len(self._output_templates) == 0: - raise ValueError("Network build func did not return any outputs.") - if any(not tfutil.is_tf_expression(t) for t in self._output_templates): - raise ValueError("Network outputs must be TensorFlow expressions.") - if any(t.shape.ndims is None for t in self._input_templates): - raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.") - if any(t.shape.ndims is None for t in self._output_templates): - raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.") - if any(not isinstance(comp, Network) for comp in self._components.values()): - raise ValueError("Components of a Network must be Networks themselves.") - if len(self._components) != len(set(comp.name for comp in self._components.values())): - raise ValueError("Components of a Network must have unique names.") - - # Initialize variables. - if len(self._var_inits): - tfutil.set_vars({self._get_vars()[name]: value for name, value in self._var_inits.items() if name in self._get_vars()}) - remaining_inits = [var.initializer for name, var in self._own_vars.items() if name not in self._var_inits] - if self._all_inits_known: - assert len(remaining_inits) == 0 - else: - tfutil.run(remaining_inits) - self._var_inits = None - - @property - def name(self): - """User-specified name string.""" - return self._name - - @property - def scope(self): - """Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.""" - return self._scope - - @property - def device(self): - """Name of the TensorFlow device that the weights of this network reside on. Determined by the current device at construction time.""" - return self._device - - @property - def static_kwargs(self): - """EasyDict of arguments passed to the user-supplied build func.""" - return copy.deepcopy(self._static_kwargs) - - @property - def components(self): - """EasyDict of sub-networks created by the build func.""" - return copy.copy(self._get_components()) - - def _get_components(self): - if self._components is None: - self._init_graph() - assert self._components is not None - return self._components - - @property - def input_shapes(self): - """List of input tensor shapes, including minibatch dimension.""" - if self._input_shapes is None: - self._input_shapes = [t.shape.as_list() for t in self.input_templates] - return copy.deepcopy(self._input_shapes) - - @property - def output_shapes(self): - """List of output tensor shapes, including minibatch dimension.""" - if self._output_shapes is None: - self._output_shapes = [t.shape.as_list() for t in self.output_templates] - return copy.deepcopy(self._output_shapes) - - @property - def input_shape(self): - """Short-hand for input_shapes[0].""" - return self.input_shapes[0] - - @property - def output_shape(self): - """Short-hand for output_shapes[0].""" - return self.output_shapes[0] - - @property - def num_inputs(self): - """Number of input tensors.""" - return len(self.input_shapes) - - @property - def num_outputs(self): - """Number of output tensors.""" - return len(self.output_shapes) - - @property - def input_names(self): - """Name string for each input.""" - if self._input_names is None: - self._input_names = [t.name.split("/")[-1].split(":")[0] for t in self.input_templates] - return copy.copy(self._input_names) - - @property - def output_names(self): - """Name string for each output.""" - if self._output_names is None: - self._output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates] - return copy.copy(self._output_names) - - @property - def input_templates(self): - """Input placeholders in the template graph.""" - if self._input_templates is None: - self._init_graph() - assert self._input_templates is not None - return copy.copy(self._input_templates) - - @property - def output_templates(self): - """Output tensors in the template graph.""" - if self._output_templates is None: - self._init_graph() - assert self._output_templates is not None - return copy.copy(self._output_templates) - - @property - def own_vars(self): - """Variables defined by this network (local_name => var), excluding sub-networks.""" - return copy.copy(self._get_own_vars()) - - def _get_own_vars(self): - if self._own_vars is None: - self._init_graph() - assert self._own_vars is not None - return self._own_vars - - @property - def vars(self): - """All variables (local_name => var).""" - return copy.copy(self._get_vars()) - - def _get_vars(self): - if self._vars is None: - self._vars = OrderedDict(self._get_own_vars()) - for comp in self._get_components().values(): - self._vars.update((comp.name + "/" + name, var) for name, var in comp._get_vars().items()) - return self._vars - - @property - def trainables(self): - """All trainable variables (local_name => var).""" - return copy.copy(self._get_trainables()) - - def _get_trainables(self): - if self._trainables is None: - self._trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable) - return self._trainables - - @property - def var_global_to_local(self): - """Mapping from variable global names to local names.""" - return copy.copy(self._get_var_global_to_local()) - - def _get_var_global_to_local(self): - if self._var_global_to_local is None: - self._var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items()) - return self._var_global_to_local - - def reset_own_vars(self) -> None: - """Re-initialize all variables of this network, excluding sub-networks.""" - if self._var_inits is None or self._components is None: - tfutil.run([var.initializer for var in self._get_own_vars().values()]) - else: - self._var_inits.clear() - self._all_inits_known = False - - def reset_vars(self) -> None: - """Re-initialize all variables of this network, including sub-networks.""" - if self._var_inits is None: - tfutil.run([var.initializer for var in self._get_vars().values()]) - else: - self._var_inits.clear() - self._all_inits_known = False - if self._components is not None: - for comp in self._components.values(): - comp.reset_vars() - - def reset_trainables(self) -> None: - """Re-initialize all trainable variables of this network, including sub-networks.""" - tfutil.run([var.initializer for var in self._get_trainables().values()]) - - def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]: - """Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s). - The graph is placed on the current TensorFlow device.""" - assert len(in_expr) == self.num_inputs - assert not all(expr is None for expr in in_expr) - self._get_vars() # ensure that all variables have been created - - # Choose build func kwargs. - build_kwargs = dict(self.static_kwargs) - build_kwargs.update(dynamic_kwargs) - build_kwargs["is_template_graph"] = False - build_kwargs["components"] = self._components - - # Build TensorFlow graph to evaluate the network. - with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name): - assert tf.get_variable_scope().name == self.scope - valid_inputs = [expr for expr in in_expr if expr is not None] - final_inputs = [] - for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes): - if expr is not None: - expr = tf.identity(expr, name=name) - else: - expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name) - final_inputs.append(expr) - out_expr = self._build_func(*final_inputs, **build_kwargs) - - # Propagate input shapes back to the user-specified expressions. - for expr, final in zip(in_expr, final_inputs): - if isinstance(expr, tf.Tensor): - expr.set_shape(final.shape) - - # Express outputs in the desired format. - assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple) - if return_as_list: - out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr) - return out_expr - - def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str: - """Get the local name of a given variable, without any surrounding name scopes.""" - assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str) - global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name - return self._get_var_global_to_local()[global_name] - - def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression: - """Find variable by local or global name.""" - assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str) - return self._get_vars()[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name - - def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray: - """Get the value of a given variable as NumPy array. - Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible.""" - return self.find_var(var_or_local_name).eval() - - def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None: - """Set the value of a given variable based on the given NumPy array. - Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible.""" - tfutil.set_vars({self.find_var(var_or_local_name): new_value}) - - def __getstate__(self) -> dict: - """Pickle export.""" - state = dict() - state["version"] = 5 - state["name"] = self.name - state["static_kwargs"] = dict(self.static_kwargs) - state["components"] = dict(self.components) - state["build_module_src"] = self._build_module_src - state["build_func_name"] = self._build_func_name - state["variables"] = list(zip(self._get_own_vars().keys(), tfutil.run(list(self._get_own_vars().values())))) - state["input_shapes"] = self.input_shapes - state["output_shapes"] = self.output_shapes - state["input_names"] = self.input_names - state["output_names"] = self.output_names - return state - - def __setstate__(self, state: dict) -> None: - """Pickle import.""" - - # Execute custom import handlers. - for handler in _import_handlers: - state = handler(state) - - # Get basic fields. - assert state["version"] in [2, 3, 4, 5] - name = state["name"] - static_kwargs = state["static_kwargs"] - build_module_src = state["build_module_src"] - build_func_name = state["build_func_name"] - - # Create temporary module from the imported source code. - module_name = "_tflib_network_import_" + uuid.uuid4().hex - module = types.ModuleType(module_name) - sys.modules[module_name] = module - _import_module_src[module] = build_module_src - exec(build_module_src, module.__dict__) # pylint: disable=exec-used - build_func = util.get_obj_from_module(module, build_func_name) - - # Initialize fields. - self._init_fields(name=name, static_kwargs=static_kwargs, build_func=build_func, build_func_name=build_func_name, build_module_src=build_module_src) - self._var_inits.update(copy.deepcopy(state["variables"])) - self._all_inits_known = True - self._components = util.EasyDict(state.get("components", {})) - self._input_shapes = copy.deepcopy(state.get("input_shapes", None)) - self._output_shapes = copy.deepcopy(state.get("output_shapes", None)) - self._input_names = copy.deepcopy(state.get("input_names", None)) - self._output_names = copy.deepcopy(state.get("output_names", None)) - - def clone(self, name: str = None, **new_static_kwargs) -> "Network": - """Create a clone of this network with its own copy of the variables.""" - static_kwargs = dict(self.static_kwargs) - static_kwargs.update(new_static_kwargs) - net = object.__new__(Network) - net._init_fields(name=(name or self.name), static_kwargs=static_kwargs, build_func=self._build_func, build_func_name=self._build_func_name, build_module_src=self._build_module_src) - net.copy_vars_from(self) - return net - - def copy_own_vars_from(self, src_net: "Network") -> None: - """Copy the values of all variables from the given network, excluding sub-networks.""" - - # Source has unknown variables or unknown components => init now. - if (src_net._var_inits is not None and not src_net._all_inits_known) or src_net._components is None: - src_net._get_vars() - - # Both networks are inited => copy directly. - if src_net._var_inits is None and self._var_inits is None: - names = [name for name in self._get_own_vars().keys() if name in src_net._get_own_vars()] - tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names})) - return - - # Read from source. - if src_net._var_inits is None: - value_dict = tfutil.run(src_net._get_own_vars()) - else: - value_dict = src_net._var_inits - - # Write to destination. - if self._var_inits is None: - tfutil.set_vars({self._get_vars()[name]: value for name, value in value_dict.items() if name in self._get_vars()}) - else: - self._var_inits.update(value_dict) - - def copy_vars_from(self, src_net: "Network") -> None: - """Copy the values of all variables from the given network, including sub-networks.""" - - # Source has unknown variables or unknown components => init now. - if (src_net._var_inits is not None and not src_net._all_inits_known) or src_net._components is None: - src_net._get_vars() - - # Source is inited, but destination components have not been created yet => set as initial values. - if src_net._var_inits is None and self._components is None: - self._var_inits.update(tfutil.run(src_net._get_vars())) - return - - # Destination has unknown components => init now. - if self._components is None: - self._get_vars() - - # Both networks are inited => copy directly. - if src_net._var_inits is None and self._var_inits is None: - names = [name for name in self._get_vars().keys() if name in src_net._get_vars()] - tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names})) - return - - # Copy recursively, component by component. - self.copy_own_vars_from(src_net) - for name, src_comp in src_net._components.items(): - if name in self._components: - self._components[name].copy_vars_from(src_comp) - - def copy_trainables_from(self, src_net: "Network") -> None: - """Copy the values of all trainable variables from the given network, including sub-networks.""" - names = [name for name in self._get_trainables().keys() if name in src_net._get_trainables()] - tfutil.set_vars(tfutil.run({self._get_vars()[name]: src_net._get_vars()[name] for name in names})) - - def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network": - """Create new network with the given parameters, and copy all variables from this network.""" - if new_name is None: - new_name = self.name - static_kwargs = dict(self.static_kwargs) - static_kwargs.update(new_static_kwargs) - net = Network(name=new_name, func_name=new_func_name, **static_kwargs) - net.copy_vars_from(self) - return net - - def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation: - """Construct a TensorFlow op that updates the variables of this network - to be slightly closer to those of the given network.""" - with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"): - ops = [] - for name, var in self._get_vars().items(): - if name in src_net._get_vars(): - cur_beta = beta if var.trainable else beta_nontrainable - new_value = tfutil.lerp(src_net._get_vars()[name], var, cur_beta) - ops.append(var.assign(new_value)) - return tf.group(*ops) - - def run(self, - *in_arrays: Tuple[Union[np.ndarray, None], ...], - input_transform: dict = None, - output_transform: dict = None, - return_as_list: bool = False, - print_progress: bool = False, - minibatch_size: int = None, - num_gpus: int = 1, - assume_frozen: bool = False, - **dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]: - """Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s). - - Args: - input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network. - The dict must contain a 'func' field that points to a top-level function. The function is called with the input - TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs. - output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network. - The dict must contain a 'func' field that points to a top-level function. The function is called with the output - TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs. - return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs. - print_progress: Print progress to the console? Useful for very large input arrays. - minibatch_size: Maximum minibatch size to use, None = disable batching. - num_gpus: Number of GPUs to use. - assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls. - dynamic_kwargs: Additional keyword arguments to be passed into the network build function. - """ - assert len(in_arrays) == self.num_inputs - assert not all(arr is None for arr in in_arrays) - assert input_transform is None or util.is_top_level_function(input_transform["func"]) - assert output_transform is None or util.is_top_level_function(output_transform["func"]) - output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs) - num_items = in_arrays[0].shape[0] - if minibatch_size is None: - minibatch_size = num_items - - # Construct unique hash key from all arguments that affect the TensorFlow graph. - key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs) - def unwind_key(obj): - if isinstance(obj, dict): - return [(key, unwind_key(value)) for key, value in sorted(obj.items())] - if callable(obj): - return util.get_top_level_function_name(obj) - return obj - key = repr(unwind_key(key)) - - # Build graph. - if key not in self._run_cache: - with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None): - with tf.device("/cpu:0"): - in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names] - in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr])) - - out_split = [] - for gpu in range(num_gpus): - with tf.device(self.device if num_gpus == 1 else "/gpu:%d" % gpu): - net_gpu = self.clone() if assume_frozen else self - in_gpu = in_split[gpu] - - if input_transform is not None: - in_kwargs = dict(input_transform) - in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs) - in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu) - - assert len(in_gpu) == self.num_inputs - out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs) - - if output_transform is not None: - out_kwargs = dict(output_transform) - out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs) - out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu) - - assert len(out_gpu) == self.num_outputs - out_split.append(out_gpu) - - with tf.device("/cpu:0"): - out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)] - self._run_cache[key] = in_expr, out_expr - - # Run minibatches. - in_expr, out_expr = self._run_cache[key] - out_arrays = [np.empty([num_items] + expr.shape.as_list()[1:], expr.dtype.name) for expr in out_expr] - - for mb_begin in range(0, num_items, minibatch_size): - if print_progress: - print("\r%d / %d" % (mb_begin, num_items), end="") - - mb_end = min(mb_begin + minibatch_size, num_items) - mb_num = mb_end - mb_begin - mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)] - mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in))) - - for dst, src in zip(out_arrays, mb_out): - dst[mb_begin: mb_end] = src - - # Done. - if print_progress: - print("\r%d / %d" % (num_items, num_items)) - - if not return_as_list: - out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays) - return out_arrays - - def list_ops(self) -> List[TfExpression]: - _ = self.output_templates # ensure that the template graph has been created - include_prefix = self.scope + "/" - exclude_prefix = include_prefix + "_" - ops = tf.get_default_graph().get_operations() - ops = [op for op in ops if op.name.startswith(include_prefix)] - ops = [op for op in ops if not op.name.startswith(exclude_prefix)] - return ops - - def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]: - """Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to - individual layers of the network. Mainly intended to be used for reporting.""" - layers = [] - - def recurse(scope, parent_ops, parent_vars, level): - if len(parent_ops) == 0 and len(parent_vars) == 0: - return - - # Ignore specific patterns. - if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]): - return - - # Filter ops and vars by scope. - global_prefix = scope + "/" - local_prefix = global_prefix[len(self.scope) + 1:] - cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]] - cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]] - if not cur_ops and not cur_vars: - return - - # Filter out all ops related to variables. - for var in [op for op in cur_ops if op.type.startswith("Variable")]: - var_prefix = var.name + "/" - cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)] - - # Scope does not contain ops as immediate children => recurse deeper. - contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type not in ["Identity", "Cast", "Transpose"] for op in cur_ops) - if (level == 0 or not contains_direct_ops) and (len(cur_ops) != 0 or len(cur_vars) != 0): - visited = set() - for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]: - token = rel_name.split("/")[0] - if token not in visited: - recurse(global_prefix + token, cur_ops, cur_vars, level + 1) - visited.add(token) - return - - # Report layer. - layer_name = scope[len(self.scope) + 1:] - layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1] - layer_trainables = [var for _name, var in cur_vars if var.trainable] - layers.append((layer_name, layer_output, layer_trainables)) - - recurse(self.scope, self.list_ops(), list(self._get_vars().items()), 0) - return layers - - def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None: - """Print a summary table of the network structure.""" - rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]] - rows += [["---"] * 4] - total_params = 0 - - for layer_name, layer_output, layer_trainables in self.list_layers(): - num_params = sum(int(np.prod(var.shape.as_list())) for var in layer_trainables) - weights = [var for var in layer_trainables if var.name.endswith("/weight:0")] - weights.sort(key=lambda x: len(x.name)) - if len(weights) == 0 and len(layer_trainables) == 1: - weights = layer_trainables - total_params += num_params - - if not hide_layers_with_no_params or num_params != 0: - num_params_str = str(num_params) if num_params > 0 else "-" - output_shape_str = str(layer_output.shape) - weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-" - rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]] - - rows += [["---"] * 4] - rows += [["Total", str(total_params), "", ""]] - - widths = [max(len(cell) for cell in column) for column in zip(*rows)] - print() - for row in rows: - print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths))) - print() - - def setup_weight_histograms(self, title: str = None) -> None: - """Construct summary ops to include histograms of all trainable parameters in TensorBoard.""" - if title is None: - title = self.name - - with tf.name_scope(None), tf.device(None), tf.control_dependencies(None): - for local_name, var in self._get_trainables().items(): - if "/" in local_name: - p = local_name.split("/") - name = title + "_" + p[-1] + "/" + "_".join(p[:-1]) - else: - name = title + "_toplevel/" + local_name - - tf.summary.histogram(name, var) - -#---------------------------------------------------------------------------- -# Backwards-compatible emulation of legacy output transformation in Network.run(). - -_print_legacy_warning = True - -def _handle_legacy_output_transforms(output_transform, dynamic_kwargs): - global _print_legacy_warning - legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"] - if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs): - return output_transform, dynamic_kwargs - - if _print_legacy_warning: - _print_legacy_warning = False - print() - print("WARNING: Old-style output transformations in Network.run() are deprecated.") - print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'") - print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.") - print() - assert output_transform is None - - new_kwargs = dict(dynamic_kwargs) - new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs} - new_transform["func"] = _legacy_output_transform_func - return new_transform, new_kwargs - -def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None): - if out_mul != 1.0: - expr = [x * out_mul for x in expr] - - if out_add != 0.0: - expr = [x + out_add for x in expr] - - if out_shrink > 1: - ksize = [1, 1, out_shrink, out_shrink] - expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr] - - if out_dtype is not None: - if tf.as_dtype(out_dtype).is_integer: - expr = [tf.round(x) for x in expr] - expr = [tf.saturate_cast(x, out_dtype) for x in expr] - return expr diff --git a/spaces/DragGan/DragGan/README.md b/spaces/DragGan/DragGan/README.md deleted file mode 100644 index b0556d90ad8a33f607579563e3af2ab468e5728f..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/README.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: DragGan - Drag Your GAN -emoji: 👆🐉 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.35.2 -app_file: visualizer_drag_gradio.py -pinned: false ---- - - -# Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold - -https://arxiv.org/abs/2305.10973 -https://huggingface.co/DragGan/DragGan-Models - -

      - -

      - -**Figure:** *Drag your GAN.* - -> **Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold**
      -> Xingang Pan, Ayush Tewari, Thomas Leimkühler, Lingjie Liu, Abhimitra Meka, Christian Theobalt
      -> *SIGGRAPH 2023 Conference Proceedings* - -## Requirements - -Please follow the requirements of [https://github.com/NVlabs/stylegan3](https://github.com/NVlabs/stylegan3). - -## Download pre-trained StyleGAN2 weights - -To download pre-trained weights, simply run: -```sh -sh scripts/download_model.sh -``` -If you want to try StyleGAN-Human and the Landscapes HQ (LHQ) dataset, please download weights from these links: [StyleGAN-Human](https://drive.google.com/file/d/1dlFEHbu-WzQWJl7nBBZYcTyo000H9hVm/view?usp=sharing), [LHQ](https://drive.google.com/file/d/16twEf0T9QINAEoMsWefoWiyhcTd-aiWc/view?usp=sharing), and put them under `./checkpoints`. - -Feel free to try other pretrained StyleGAN. - -## Run DragGAN GUI - -To start the DragGAN GUI, simply run: -```sh -sh scripts/gui.sh -``` - -This GUI supports editing GAN-generated images. To edit a real image, you need to first perform GAN inversion using tools like [PTI](https://github.com/danielroich/PTI). Then load the new latent code and model weights to the GUI. - -You can run DragGAN Gradio demo as well: -```sh -python visualizer_drag_gradio.py -``` - -## Acknowledgement - -This code is developed based on [StyleGAN3](https://github.com/NVlabs/stylegan3). Part of the code is borrowed from [StyleGAN-Human](https://github.com/stylegan-human/StyleGAN-Human). - -## License - -The code related to the DragGAN algorithm is licensed under [CC-BY-NC](https://creativecommons.org/licenses/by-nc/4.0/). -However, most of this project are available under a separate license terms: all codes used or modified from [StyleGAN3](https://github.com/NVlabs/stylegan3) is under the [Nvidia Source Code License](https://github.com/NVlabs/stylegan3/blob/main/LICENSE.txt). - -Any form of use and derivative of this code must preserve the watermarking functionality. - -## BibTeX - -```bibtex -@inproceedings{pan2023draggan, - title={Drag Your GAN: Interactive Point-based Manipulation on the Generative Image Manifold}, - author={Pan, Xingang and Tewari, Ayush, and Leimk{\"u}hler, Thomas and Liu, Lingjie and Meka, Abhimitra and Theobalt, Christian}, - booktitle = {ACM SIGGRAPH 2023 Conference Proceedings}, - year={2023} -} -``` diff --git a/spaces/DragGan/DragGan/stylegan_human/edit/edit_config.py b/spaces/DragGan/DragGan/stylegan_human/edit/edit_config.py deleted file mode 100644 index e5c6c5fc234c696fb161874f972ca2b83ec9896a..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/edit/edit_config.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -attr_dict = dict( - interface_gan={ # strength - 'upper_length': [-1], # strength: negative for shorter, positive for longer - 'bottom_length': [1] - }, - stylespace={ # layer, strength, threshold - 'upper_length': [5, -5, 0.0028], # strength: negative for shorter, positive for longer - 'bottom_length': [3, 5, 0.003] - }, - sefa={ # layer, strength - 'upper_length': [[4, 5, 6, 7], 5], #-5 # strength: negative for longer, positive for shorter - 'bottom_length': [[4, 5, 6, 7], 5] - } -) \ No newline at end of file diff --git a/spaces/Dragonnnext/Drago-Proxy/greeting.md b/spaces/Dragonnnext/Drago-Proxy/greeting.md deleted file mode 100644 index d06b0426d609d2d786d5bdd28dff77e6749536ec..0000000000000000000000000000000000000000 --- a/spaces/Dragonnnext/Drago-Proxy/greeting.md +++ /dev/null @@ -1,11 +0,0 @@ -**THIS PROXY IS PRIVATE USED ONLY BY ME TO TEST KEYS OR COOM MYSELF, USE UNICORN (TURBO) ONE (*THIS ONE WILL NEVER BE PUBLIC*)** - - https://huggingface.co/spaces/Dragonnext/Unicorn-proxy - -Contact with me: -contactdrago@proton.me - -My private bots not promising good results (Feel free to share rentry): -https://rentry.co/dragobots - -![cute](https://files.catbox.moe/nuxjp4.png) \ No newline at end of file diff --git a/spaces/Duckymalone/dreamlike-art-dreamlike-diffusion-1.0/app.py b/spaces/Duckymalone/dreamlike-art-dreamlike-diffusion-1.0/app.py deleted file mode 100644 index 26e036ff2e92bfa549428082790db4acf5d94844..0000000000000000000000000000000000000000 --- a/spaces/Duckymalone/dreamlike-art-dreamlike-diffusion-1.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/dreamlike-art/dreamlike-diffusion-1.0").launch() \ No newline at end of file diff --git a/spaces/Endream/test/README.md b/spaces/Endream/test/README.md deleted file mode 100644 index e57025c53bc82ab0e797efdb0860db868c9a18e5..0000000000000000000000000000000000000000 --- a/spaces/Endream/test/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Test -emoji: 🐠 -colorFrom: gray -colorTo: gray -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Epoching/DocumentQA/README.md b/spaces/Epoching/DocumentQA/README.md deleted file mode 100644 index da22636c00fb195f59928e6b80d3d55a74f3a8c9..0000000000000000000000000000000000000000 --- a/spaces/Epoching/DocumentQA/README.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Detect Retrieve Comprehend -emoji: 📚 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Release - ---- - -**Detect, Retrieve, Comprehend** is distributed under the terms of Apache 2.0 license with LLVM exception. - -See [LICENSE]() and [NOTICE]() for details. - -SPDX-License-Identifier: Apache-2.0-with-LLVM-exception - -LLNL-CODE-838964 \ No newline at end of file diff --git a/spaces/EuroPython2022/BayesCap/utils.py b/spaces/EuroPython2022/BayesCap/utils.py deleted file mode 100644 index 2566a6d3b164e907347e120f942ac2bed23a29d5..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/BayesCap/utils.py +++ /dev/null @@ -1,117 +0,0 @@ -import random -from typing import Any, Optional -import numpy as np -import os -import cv2 -from glob import glob -from PIL import Image, ImageDraw -from tqdm import tqdm -import kornia -import matplotlib.pyplot as plt -import seaborn as sns -import albumentations as albu -import functools -import math - -import torch -import torch.nn as nn -from torch import Tensor -import torchvision as tv -import torchvision.models as models -from torchvision import transforms -from torchvision.transforms import functional as F -from losses import TempCombLoss - - -######## for loading checkpoint from googledrive -google_drive_paths = { - "BayesCap_SRGAN.pth": "https://drive.google.com/uc?id=1d_5j1f8-vN79htZTfRUqP1ddHZIYsNvL", - "BayesCap_ckpt.pth": "https://drive.google.com/uc?id=1Vg1r6gKgQ1J3M51n6BeKXYS8auT9NhA9", -} - -def ensure_checkpoint_exists(model_weights_filename): - if not os.path.isfile(model_weights_filename) and ( - model_weights_filename in google_drive_paths - ): - gdrive_url = google_drive_paths[model_weights_filename] - try: - from gdown import download as drive_download - - drive_download(gdrive_url, model_weights_filename, quiet=False) - except ModuleNotFoundError: - print( - "gdown module not found.", - "pip3 install gdown or, manually download the checkpoint file:", - gdrive_url - ) - - if not os.path.isfile(model_weights_filename) and ( - model_weights_filename not in google_drive_paths - ): - print( - model_weights_filename, - " not found, you may need to manually download the model weights." - ) - -def normalize(image: np.ndarray) -> np.ndarray: - """Normalize the ``OpenCV.imread`` or ``skimage.io.imread`` data. - Args: - image (np.ndarray): The image data read by ``OpenCV.imread`` or ``skimage.io.imread``. - Returns: - Normalized image data. Data range [0, 1]. - """ - return image.astype(np.float64) / 255.0 - - -def unnormalize(image: np.ndarray) -> np.ndarray: - """Un-normalize the ``OpenCV.imread`` or ``skimage.io.imread`` data. - Args: - image (np.ndarray): The image data read by ``OpenCV.imread`` or ``skimage.io.imread``. - Returns: - Denormalized image data. Data range [0, 255]. - """ - return image.astype(np.float64) * 255.0 - - -def image2tensor(image: np.ndarray, range_norm: bool, half: bool) -> torch.Tensor: - """Convert ``PIL.Image`` to Tensor. - Args: - image (np.ndarray): The image data read by ``PIL.Image`` - range_norm (bool): Scale [0, 1] data to between [-1, 1] - half (bool): Whether to convert torch.float32 similarly to torch.half type. - Returns: - Normalized image data - Examples: - >>> image = Image.open("image.bmp") - >>> tensor_image = image2tensor(image, range_norm=False, half=False) - """ - tensor = F.to_tensor(image) - - if range_norm: - tensor = tensor.mul_(2.0).sub_(1.0) - if half: - tensor = tensor.half() - - return tensor - - -def tensor2image(tensor: torch.Tensor, range_norm: bool, half: bool) -> Any: - """Converts ``torch.Tensor`` to ``PIL.Image``. - Args: - tensor (torch.Tensor): The image that needs to be converted to ``PIL.Image`` - range_norm (bool): Scale [-1, 1] data to between [0, 1] - half (bool): Whether to convert torch.float32 similarly to torch.half type. - Returns: - Convert image data to support PIL library - Examples: - >>> tensor = torch.randn([1, 3, 128, 128]) - >>> image = tensor2image(tensor, range_norm=False, half=False) - """ - if range_norm: - tensor = tensor.add_(1.0).div_(2.0) - if half: - tensor = tensor.half() - - image = tensor.squeeze_(0).permute(1, 2, 0).mul_(255).clamp_(0, 255).cpu().numpy().astype("uint8") - - return image diff --git a/spaces/EuroPython2022/clickbaitonator/fudge/eval_formality_metrics.py b/spaces/EuroPython2022/clickbaitonator/fudge/eval_formality_metrics.py deleted file mode 100644 index 972a5f46f6207f1aa1e0a8833452738c68d404ad..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/clickbaitonator/fudge/eval_formality_metrics.py +++ /dev/null @@ -1,73 +0,0 @@ -from argparse import ArgumentParser -import pickle -import os -import math - -import sacrebleu -import numpy as np -import torch -from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline, set_seed, GPT2Tokenizer, GPT2Model, MarianTokenizer, MarianMTModel - -from constants import * -from model import Model -from util import save_checkpoint, ProgressMeter, AverageMeter, num_params - -def avg_formality(preds, model, tokenizer, device='cuda'): - probs = [] - for sent in preds: - encoded_input = tokenizer.encode(sent, return_tensors='pt').to(device) - lengths = torch.LongTensor([encoded_input.shape[1]]).to(device) - scores = model(encoded_input, lengths=lengths) # batch x seq - score = scores.flatten()[-1].item() - probs.append(math.exp(score) / (1 + math.exp(score))) # sigmoided score = prob - return np.mean(probs) - -if __name__=='__main__': - parser = ArgumentParser() - parser.add_argument('--pred', type=str) - parser.add_argument('--ref', type=str, nargs='*', help='bleu refs') - parser.add_argument('--ckpt', type=str, help='formality classifier') - parser.add_argument('--dataset_info', type=str) - parser.add_argument('--device', type=str, default='cuda', choices=['cpu', 'cuda']) - parser.add_argument('--model_string', type=str, default='Helsinki-NLP/opus-mt-es-en') - - args = parser.parse_args() - - # refs = [['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'], - # ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.']] - # sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] - print('num ref files', len(args.ref)) - pred = [] - with open(args.pred, 'r') as rf: - for line in rf: - pred.append(line.strip()) - refs = [] - for ref_file in args.ref: - ref = [] - with open(ref_file, 'r') as rf: - for line in rf: - ref.append(line.strip()) - assert len(ref) == len(pred) - refs.append(ref) - bleu = sacrebleu.corpus_bleu(pred, refs) - print('BLEU score:', bleu.score) - - with open(args.dataset_info, 'rb') as rf: - dataset_info = pickle.load(rf) - - tokenizer = MarianTokenizer.from_pretrained(args.model_string) - tokenizer.add_special_tokens({'pad_token': PAD_TOKEN}) - pad_id = tokenizer.encode(PAD_TOKEN)[0] - - checkpoint = torch.load(args.ckpt, map_location=args.device) - model_args = checkpoint['args'] - conditioning_model = Model(model_args, pad_id, len(dataset_info.index2word)) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway - conditioning_model.load_state_dict(checkpoint['state_dict']) - conditioning_model = conditioning_model.to(args.device) - conditioning_model.eval() - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.ckpt, checkpoint['epoch'])) - print('num params', num_params(conditioning_model)) - - print('avg formality prob according to model', avg_formality(pred, conditioning_model, tokenizer, device=args.device)) - diff --git a/spaces/Faridmaruf/rvc-genshin-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/Faridmaruf/rvc-genshin-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/Faridmaruf/rvc-genshin-v2/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/Felix123456/bingo/src/components/tailwind-indicator.tsx b/spaces/Felix123456/bingo/src/components/tailwind-indicator.tsx deleted file mode 100644 index f2a1291213dd67055fcebe67fab574c8441338df..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/tailwind-indicator.tsx +++ /dev/null @@ -1,14 +0,0 @@ -export function TailwindIndicator() { - if (process.env.NODE_ENV === 'production') return null - - return ( -
      -
      xs
      -
      sm
      -
      md
      -
      lg
      -
      xl
      -
      2xl
      -
      - ) -} diff --git a/spaces/Felix123456/bingo/tailwind.config.js b/spaces/Felix123456/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/FluxWaveCorp/Ghostwriter-Bloom/templates/Templates.py b/spaces/FluxWaveCorp/Ghostwriter-Bloom/templates/Templates.py deleted file mode 100644 index f92713ae4328550b05c8c48e73c3919efa2f1d3f..0000000000000000000000000000000000000000 --- a/spaces/FluxWaveCorp/Ghostwriter-Bloom/templates/Templates.py +++ /dev/null @@ -1,64 +0,0 @@ - -class PromptTemplate: - TOPIC_TO_ABSTRACST = ''' - Write an abstract of a scientific paper based on these topics and keywords. - \n\n\n - ### - \n\n\n - topics: \n - Recommender systems\n - Filtering\n - Electronic commerce\n - abstract: With the collaborative filtering techniques becoming more and more mature, recommender systems are widely used nowadays, especially in electronic commerce and social networks. However, the utilization of recommender systems in academic research itself has not received enough attention. A research paper recommender system would greatly help researchers to find the most desirable papers in their fields of endeavour. Due to the textual nature of papers, content information could be integrated into existed recommendation methods. In this paper, we proposed that by using topic model techniques to make topic analysis on research papers, we could introduce a thematic similarity measurement into a modified version of the item-based recommendation approach. This novel recommendation method could considerably alleviate the cold start problem in research paper recommendations. Our experiment result shows that our approach could recommend highly relevant research papers. - \n\n\n - ### - \n\n\n - topics: \n - Corona\n - Loss measurement\n - Rain\n - abstract: Transmission line corona loss research is one of the critical technologies of 1000 kV UHV transmission and transformation engineering. Transmission line corona loss relates to many factors of climate, and rainfall rate affects corona loss distinctly very much. By means of UHV AC single circuit test line, corona loss monitoring and UHV corona cage conductor corona loss measurement, this paper researched the effect of rainfall rates on UHV AC single-circuit transmission line's corona loss. This paper applied a corona loss monitoring system to monitor corona loss of UHV single circuit test line and obtained corona loss monitoring data in light rain, moderate rain, and heavy rain conditions. Analyze corona loss test results. Corona loss values in light rain, moderate rain, and heavy rain conditions are obtained. The results show that rain has an obvious influence on corona loss of the line, at the beginning of rain, corona loss increases quickly, after rain, corona loss decreases with the drying of the conductor. The decay time is related to surface field strength, wind speed, air humidity, temperature and other factors. When the rainfall rate is low, corona loss increases fast with the increase of the rainfall rate. With the increase in the rainfall rate, corona loss grows slowly. When the rainfall rate increases to some level, corona loss stop growing. - \n\n\n - ### - \n\n\n - topic: \n - Machine learning\n - Data models\n - Stock markets\n - Testing\n - Machine learning algorithms\n - abstract: In this paper, we provide a comparative analysis sheet of the main publicly-available benchmark data sets for stock markets statistical modelling statistical testing, providing a common language for people from different academic fields to discuss technical issues more conduct conductively. In order to more facilitate the exchange of research results of more people, this article will continue to keep up with time, and follow-up for a comprehensive study of the data model characteristics and data features of these data sets, providing a framework for statistical testing techniques and machine learning algorithms related activities of more practitioners. - \n\n\n - ### - \n\n\n - topic: \n - Organizations\n - Business intelligence\n - Warehousing\n - Data mining\n - Decision making\n - Databases\n - Industries\n - abstract: We need an effective and reliable arrangement of data and, with the foundations of statistical and business intelligence, data mining and warehousing, companies with the increased amount of data will be able to decrease uncertainty and make the best decision. In this respect, our paper overviews the concept, technology and technology trends of business intelligence, data mining and warehouse from an aspect of the infrastructure feature of the industry, classified as production planning for improving operational efficiency. In order to evaluate the benefits of using business intelligence, this paper examine some enterprises utilizing data mining and business intelligence - \n\n\n - ''' - - TITLE_TO_ABSTRACST = ''' - write an abstract of a scientific paper using the title of the paper - \n\n\n - ### - \n\n\n - title: Research paper recommendation with topic analysis. - \n - abstract: With the collaborative filtering techniques becoming more and more mature, recommender systems are widely used nowadays, especially in electronic commerce and social networks. However, the utilization of recommender systems in academic research itself has not received enough attention. A research paper recommender system would greatly help researchers to find the most desirable papers in their fields of endeavour. Due to the textual nature of papers, content information could be integrated into existed recommendation methods. In this paper, we proposed that by using topic model techniques to make topic analysis on research papers, we could introduce a thematic similarity measurement into a modified version of the item-based recommendation approach. This novel recommendation method could considerably alleviate the cold start problem in research paper recommendations. Our experiment result shows that our approach could recommend highly relevant research papers.\n\n\n - ### - \n\n\n - title: UHV AC corona loss measurement and analysis under the rain.\nabstract: Transmission line corona loss research is one of the critical technologies of 1000 kV UHV transmission and transformation engineering. Transmission line corona loss relates to many factors of climate, and rainfall rate affects corona loss distinctly very much. By means of UHV AC single circuit test line, corona loss monitoring and UHV corona cage conductor corona loss measurement, this paper researched the effect of rainfall rates on UHV AC single-circuit transmission lines corona loss. This paper applied a corona loss monitoring system to monitor corona loss of UHV single circuit test line and obtained corona loss monitoring data in light rain, moderate rain, and heavy rain conditions. Analyze corona loss test results. Corona loss values in light rain, moderate rain, and heavy rain conditions are obtained. The results show that rain has an obvious influence on corona loss of the line, at the beginning of rain, corona loss increases quickly, after rain, corona loss decreases with the drying of the conductor. The decay time is related to surface field strength, wind speed, air humidity, temperature and other factors. When the rainfall rate is low, corona loss increases fast with the increase of the rainfall rate. With the increase in the rainfall rate, corona loss grows slowly. When the rainfall rate increases to some level, corona loss stop growing. - \n\n\n - ### - \n\n\n - title: Cingulate circuits are associated with escalation of heroin use and naloxone-induced increases in heroin self-administration - \n - abstract: Opioid use disorder (OUD) is defined as a compulsion to seek and take opioids, loss of control over intake and the development of a negative emotional state when access to opioids is denied. Using functional magnetic resonance imaging (fMRI) data in a rat model of OUD, we demonstrate that the escalation of heroin self-administration (SA) and the increased heroin SA following an injection of an opioid receptor antagonist (naloxone) are associated with changes in distinct brain circuits, centered on the cingulate cortex (Cg). Here, SA escalation score was negatively associated with changes in resting state functional connectivity (rsFC) between the Cg and the dorsal striatum. Conversely, increased heroin SA following naloxone injection, was associated with increased connectivity between the Cg and the extended amygdala and hypothalamus. Naloxone-induced increased SA was also positively associated with changes in the amplitude of low frequency fluctuations within the Cg, a measure of spontaneous neuronal activity. Characterizing the distinct brain circuit and behavior changes associated with different facets of addiction increases our understanding of OUD and may provide insight into addiction prevention and treatment. - \n\n\n - ''' diff --git a/spaces/FridaZuley/RVC_HFKawaii/lib/infer_pack/models.py b/spaces/FridaZuley/RVC_HFKawaii/lib/infer_pack/models.py deleted file mode 100644 index ec107476df968e51aafc6c3d102a9ed8c53f141a..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/lib/infer_pack/models.py +++ /dev/null @@ -1,1144 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - if uv.device.type == "privateuseone": # for DirectML - uv = uv.float() - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap \ No newline at end of file diff --git a/spaces/FridaZuley/RVC_HFKawaii/tensorlowest.py b/spaces/FridaZuley/RVC_HFKawaii/tensorlowest.py deleted file mode 100644 index eccd4dbf3494434e59f7defaae6ab91797263b90..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/tensorlowest.py +++ /dev/null @@ -1,123 +0,0 @@ -from tensorboard.backend.event_processing import event_accumulator - -import os -from shutil import copy2 -from re import search as RSearch -import pandas as pd -from ast import literal_eval as LEval - -weights_dir = 'weights/' - -def find_biggest_tensorboard(tensordir): - try: - files = [f for f in os.listdir(tensordir) if f.endswith('.0')] - if not files: - print("No files with the '.0' extension found!") - return - - max_size = 0 - biggest_file = "" - - for file in files: - file_path = os.path.join(tensordir, file) - if os.path.isfile(file_path): - file_size = os.path.getsize(file_path) - if file_size > max_size: - max_size = file_size - biggest_file = file - - return biggest_file - - except FileNotFoundError: - print("Couldn't find your model!") - return - -def main(model_name, save_freq, lastmdls): - global lowestval_weight_dir, scl - - tensordir = os.path.join('logs', model_name) - lowestval_weight_dir = os.path.join(tensordir, "lowestvals") - - latest_file = find_biggest_tensorboard(tensordir) - - if latest_file is None: - print("Couldn't find a valid tensorboard file!") - return - - tfile = os.path.join(tensordir, latest_file) - - ea = event_accumulator.EventAccumulator(tfile, - size_guidance={ - event_accumulator.COMPRESSED_HISTOGRAMS: 500, - event_accumulator.IMAGES: 4, - event_accumulator.AUDIO: 4, - event_accumulator.SCALARS: 0, - event_accumulator.HISTOGRAMS: 1, - }) - - ea.Reload() - ea.Tags() - - scl = ea.Scalars('loss/g/total') - - listwstep = {} - - for val in scl: - if (val.step // save_freq) * save_freq in [val.step for val in scl]: - listwstep[float(val.value)] = (val.step // save_freq) * save_freq - - lowest_vals = sorted(listwstep.keys())[:lastmdls] - - sorted_dict = {value: step for value, step in listwstep.items() if value in lowest_vals} - - return sorted_dict - -def selectweights(model_name, file_dict, weights_dir, lowestval_weight_dir): - os.makedirs(lowestval_weight_dir, exist_ok=True) - logdir = [] - files = [] - lbldict = { - 'Values': {}, - 'Names': {} - } - weights_dir_path = os.path.join(weights_dir, "") - low_val_path = os.path.join(os.getcwd(), os.path.join(lowestval_weight_dir, "")) - - try: - file_dict = LEval(file_dict) - except Exception as e: - print(f"Error! {e}") - return f"Couldn't load tensorboard file! {e}" - - weights = [f for f in os.scandir(weights_dir)] - for key, value in file_dict.items(): - pattern = fr"^{model_name}_.*_s{value}\.pth$" - matching_weights = [f.name for f in weights if f.is_file() and RSearch(pattern, f.name)] - for weight in matching_weights: - source_path = weights_dir_path + weight - destination_path = os.path.join(lowestval_weight_dir, weight) - - copy2(source_path, destination_path) - - logdir.append(f"File = {weight} Value: {key}, Step: {value}") - - lbldict['Names'][weight] = weight - lbldict['Values'][weight] = key - - files.append(low_val_path + weight) - - print(f"File = {weight} Value: {key}, Step: {value}") - - yield ('\n'.join(logdir), files, pd.DataFrame(lbldict)) - - - return ''.join(logdir), files, pd.DataFrame(lbldict) - - -if __name__ == "__main__": - model = str(input("Enter the name of the model: ")) - sav_freq = int(input("Enter save frequency of the model: ")) - ds = main(model, sav_freq) - - if ds: selectweights(model, ds, weights_dir, lowestval_weight_dir) - \ No newline at end of file diff --git "a/spaces/Frorozcol/financIA/pages/1_\360\237\223\212Analisis_exploratorio.py" "b/spaces/Frorozcol/financIA/pages/1_\360\237\223\212Analisis_exploratorio.py" deleted file mode 100644 index 73d7a66a282248c69b0b1a76bbb5b435169fca97..0000000000000000000000000000000000000000 --- "a/spaces/Frorozcol/financIA/pages/1_\360\237\223\212Analisis_exploratorio.py" +++ /dev/null @@ -1,12 +0,0 @@ -import os -import streamlit as st -import streamlit.components.v1 as components - -def main(): - st.title("Análisis exploratorio") - with open('html/0-1 Pysentimiento.html', 'r', encoding='utf-8') as file: - markdown_text = file.read() - components.html(markdown_text,height=1000,scrolling=True) - - -main() \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_bridge.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_bridge.py deleted file mode 100644 index e7153210fa5682badfbf73836823169411fb87fc..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_bridge.py +++ /dev/null @@ -1,81 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils - -class BuildBridge(Task): - """Construct a bridge using two yellow blocks and three blue blocks. - Firstly, place the two yellow blocks on each of the two bases parallel to each other with a fair amount of space in between. - Then, place the blue block horizontally on top of the yellow blocks.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "build a bridge using four yellow blocks and one long blue block" - self.task_completed_desc = "done building bridge." - - def reset(self, env): - super().reset(env) - - # Add yellow blocks. - base_length = 0.04 - base_size = (base_length, base_length, base_length) - base_block_urdf = "box/box-template.urdf" - bridge_pose = ((0.5, 0.0, 0.0), (0, 0, 0, 1)) # fixed pose - self.add_corner_anchor_for_pose(env, bridge_pose) - - base_block_urdf = self.fill_template(base_block_urdf, {'DIM': base_size}) - anchor_base_poses = [(utils.apply(bridge_pose, (- 3 * base_length / 2, 0, 0.001)), bridge_pose[1]), - (utils.apply(bridge_pose, ( 3 * base_length / 2, 0, 0.001)), bridge_pose[1]), - (utils.apply(bridge_pose, (- 3 * base_length / 2, 0, 0.041)), bridge_pose[1]), - (utils.apply(bridge_pose, ( 3 * base_length / 2, 0, 0.041)), bridge_pose[1])] - base_blocks = [] - - for idx in range(4): - base_block_pose = self.get_random_pose(env, base_size) - base_block_id = env.add_object(base_block_urdf, base_block_pose, color=utils.COLORS['yellow']) - base_blocks.append(base_block_id) - - # Add car body block. - body_size = (0.12, 0.04, 0.02) # x, y, z dimensions for the asset size - body_block_urdf = "box/box-template.urdf" - body_block_urdf = self.fill_template(body_block_urdf, {'DIM': body_size}) - body_block_pose = self.get_random_pose(env, body_size) - body_block_id = env.add_object(body_block_urdf, body_block_pose, color=utils.COLORS['blue']) - anchor_body_poses = [bridge_pose] - - # Goal: Firstly, create the base of the car by positioning two red blocks side by side. - self.add_goal(objs=base_blocks[:2], - matches=np.ones((2, 2)), - targ_poses=anchor_base_poses, - replace=False, - rotations=True, - metric='pose', - params=None, - step_max_reward=1./4, - language_goal="Firstly, place the two yellow blocks on each of the two bases parallel to each other with a fair amount of space in between.") - - self.add_goal(objs=base_blocks[2:], - matches=np.ones((2, 2)), - targ_poses=anchor_base_poses, - replace=False, - rotations=True, - metric='pose', - params=None, - step_max_reward=1./2, - language_goal="Place the two yellow blocks on each of the two bases parallel to each other with a fair amount of space in between.") - - # Then, add the car body by stacking a blue block on top of the base. - self.add_goal(objs=[body_block_id], - matches=np.ones((1, 1)), - targ_poses=anchor_body_poses, - replace=False, - rotations=True, - metric='pose', - params=None, - step_max_reward=1./4, - language_goal="Then, place the blue block horizontally on top of the yellow blocks.") \ No newline at end of file diff --git a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/latex/attention/background.tex b/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/latex/attention/background.tex deleted file mode 100644 index 785069dc0f9143bad24e640056dd1072d5c6e5b5..0000000000000000000000000000000000000000 --- a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/latex/attention/background.tex +++ /dev/null @@ -1,58 +0,0 @@ -The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}. - -Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}. - -End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}. - -To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution. -In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}. - - -%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs. - -%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation. - -%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture. - -%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost. - -%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length. - -%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs. - -%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture. - - - -%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)? - -%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence. - -%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model. - -%\begin{table}[h!] -%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.} -%\label{tab:op_complexities} -%\begin{center} -%\vspace{-5pt} -%\scalebox{0.75}{ - -%\begin{tabular}{l|c|c|c} -%\hline \hline -%Layer Type & Receptive & Complexity & Sequential \\ -% & Field & & Operations \\ -%\hline -%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\ -%\hline -%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\ -%\hline -%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\ -%\hline -%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\ -%\hline -%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\ -%\hline \hline -%\end{tabular} -%} -%\end{center} -%\end{table} \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index 3b3683af235f46df36d8793e52c2b9c52e0defeb..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 32972de857b3c4f43170dcd3e7fbce76425f094d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=False, - plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py deleted file mode 100644 index 0a163ce445c35d51a9d8940e46697c5c6a39d354..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - type='MaskScoringRCNN', - roi_head=dict( - type='MaskScoringRoIHead', - mask_iou_head=dict( - type='MaskIoUHead', - num_convs=4, - num_fcs=2, - roi_feat_size=14, - in_channels=256, - conv_out_channels=256, - fc_out_channels=1024, - num_classes=80)), - # model training and testing settings - train_cfg=dict(rcnn=dict(mask_thr_binary=0.5))) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py deleted file mode 100644 index cef0668ad8f1b767db0dc8deeb688d67005af1e4..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r101_fpn_20e_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './scnet_r50_fpn_20e_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/evaluation/bbox_overlaps.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/evaluation/bbox_overlaps.py deleted file mode 100644 index 93559ea0f25369d552a5365312fa32b9ffec9226..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/evaluation/bbox_overlaps.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np - - -def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6): - """Calculate the ious between each bbox of bboxes1 and bboxes2. - - Args: - bboxes1(ndarray): shape (n, 4) - bboxes2(ndarray): shape (k, 4) - mode(str): iou (intersection over union) or iof (intersection - over foreground) - - Returns: - ious(ndarray): shape (n, k) - """ - - assert mode in ['iou', 'iof'] - - bboxes1 = bboxes1.astype(np.float32) - bboxes2 = bboxes2.astype(np.float32) - rows = bboxes1.shape[0] - cols = bboxes2.shape[0] - ious = np.zeros((rows, cols), dtype=np.float32) - if rows * cols == 0: - return ious - exchange = False - if bboxes1.shape[0] > bboxes2.shape[0]: - bboxes1, bboxes2 = bboxes2, bboxes1 - ious = np.zeros((cols, rows), dtype=np.float32) - exchange = True - area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1]) - area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1]) - for i in range(bboxes1.shape[0]): - x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) - y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) - x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) - y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) - overlap = np.maximum(x_end - x_start, 0) * np.maximum( - y_end - y_start, 0) - if mode == 'iou': - union = area1[i] + area2 - overlap - else: - union = area1[i] if not exchange else area2 - union = np.maximum(union, eps) - ious[i, :] = overlap / union - if exchange: - ious = ious.T - return ious diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/adversarial/discriminators/msstftd.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/adversarial/discriminators/msstftd.py deleted file mode 100644 index 81a9100961c7a89a39df2643b24268fb90bfeaa4..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/adversarial/discriminators/msstftd.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import torchaudio -import torch -from torch import nn -from einops import rearrange - -from ...modules import NormConv2d -from .base import MultiDiscriminator, MultiDiscriminatorOutputType - - -def get_2d_padding(kernel_size: tp.Tuple[int, int], dilation: tp.Tuple[int, int] = (1, 1)): - return (((kernel_size[0] - 1) * dilation[0]) // 2, ((kernel_size[1] - 1) * dilation[1]) // 2) - - -class DiscriminatorSTFT(nn.Module): - """STFT sub-discriminator. - - Args: - filters (int): Number of filters in convolutions. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - n_fft (int): Size of FFT for each scale. - hop_length (int): Length of hop between STFT windows for each scale. - kernel_size (tuple of int): Inner Conv2d kernel sizes. - stride (tuple of int): Inner Conv2d strides. - dilations (list of int): Inner Conv2d dilation on the time dimension. - win_length (int): Window size for each scale. - normalized (bool): Whether to normalize by magnitude after stft. - norm (str): Normalization method. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - growth (int): Growth factor for the filters. - """ - def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, - n_fft: int = 1024, hop_length: int = 256, win_length: int = 1024, max_filters: int = 1024, - filters_scale: int = 1, kernel_size: tp.Tuple[int, int] = (3, 9), dilations: tp.List = [1, 2, 4], - stride: tp.Tuple[int, int] = (1, 2), normalized: bool = True, norm: str = 'weight_norm', - activation: str = 'LeakyReLU', activation_params: dict = {'negative_slope': 0.2}): - super().__init__() - assert len(kernel_size) == 2 - assert len(stride) == 2 - self.filters = filters - self.in_channels = in_channels - self.out_channels = out_channels - self.n_fft = n_fft - self.hop_length = hop_length - self.win_length = win_length - self.normalized = normalized - self.activation = getattr(torch.nn, activation)(**activation_params) - self.spec_transform = torchaudio.transforms.Spectrogram( - n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window_fn=torch.hann_window, - normalized=self.normalized, center=False, pad_mode=None, power=None) - spec_channels = 2 * self.in_channels - self.convs = nn.ModuleList() - self.convs.append( - NormConv2d(spec_channels, self.filters, kernel_size=kernel_size, padding=get_2d_padding(kernel_size)) - ) - in_chs = min(filters_scale * self.filters, max_filters) - for i, dilation in enumerate(dilations): - out_chs = min((filters_scale ** (i + 1)) * self.filters, max_filters) - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, - dilation=(dilation, 1), padding=get_2d_padding(kernel_size, (dilation, 1)), - norm=norm)) - in_chs = out_chs - out_chs = min((filters_scale ** (len(dilations) + 1)) * self.filters, max_filters) - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_size[0], kernel_size[0]), - padding=get_2d_padding((kernel_size[0], kernel_size[0])), - norm=norm)) - self.conv_post = NormConv2d(out_chs, self.out_channels, - kernel_size=(kernel_size[0], kernel_size[0]), - padding=get_2d_padding((kernel_size[0], kernel_size[0])), - norm=norm) - - def forward(self, x: torch.Tensor): - fmap = [] - z = self.spec_transform(x) # [B, 2, Freq, Frames, 2] - z = torch.cat([z.real, z.imag], dim=1) - z = rearrange(z, 'b c w t -> b c t w') - for i, layer in enumerate(self.convs): - z = layer(z) - z = self.activation(z) - fmap.append(z) - z = self.conv_post(z) - return z, fmap - - -class MultiScaleSTFTDiscriminator(MultiDiscriminator): - """Multi-Scale STFT (MS-STFT) discriminator. - - Args: - filters (int): Number of filters in convolutions. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - sep_channels (bool): Separate channels to distinct samples for stereo support. - n_ffts (Sequence[int]): Size of FFT for each scale. - hop_lengths (Sequence[int]): Length of hop between STFT windows for each scale. - win_lengths (Sequence[int]): Window size for each scale. - **kwargs: Additional args for STFTDiscriminator. - """ - def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, sep_channels: bool = False, - n_ffts: tp.List[int] = [1024, 2048, 512], hop_lengths: tp.List[int] = [256, 512, 128], - win_lengths: tp.List[int] = [1024, 2048, 512], **kwargs): - super().__init__() - assert len(n_ffts) == len(hop_lengths) == len(win_lengths) - self.sep_channels = sep_channels - self.discriminators = nn.ModuleList([ - DiscriminatorSTFT(filters, in_channels=in_channels, out_channels=out_channels, - n_fft=n_ffts[i], win_length=win_lengths[i], hop_length=hop_lengths[i], **kwargs) - for i in range(len(n_ffts)) - ]) - - @property - def num_discriminators(self): - return len(self.discriminators) - - def _separate_channels(self, x: torch.Tensor) -> torch.Tensor: - B, C, T = x.shape - return x.view(-1, 1, T) - - def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType: - logits = [] - fmaps = [] - for disc in self.discriminators: - logit, fmap = disc(x) - logits.append(logit) - fmaps.append(fmap) - return logits, fmaps diff --git a/spaces/HESOAYM/ElviraMulti/chatgpt - macOS.command b/spaces/HESOAYM/ElviraMulti/chatgpt - macOS.command deleted file mode 100644 index fa015edca9e6916f24394813ce8ba77d2072e296..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/chatgpt - macOS.command +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -echo Opening ChuanhuChatGPT... -cd "$(dirname "${BASH_SOURCE[0]}")" -nohup python3 ChuanhuChatbot.py >/dev/null 2>&1 & -sleep 5 -open http://127.0.0.1:7860 -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). If you kill ChuanhuChatbot, Use "pkill -f 'ChuanhuChatbot'" command in terminal. \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/ofa.py b/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/ofa.py deleted file mode 100644 index 1d852937ad260f3adbf0114dbdbc15599f47d565..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/ofa.py +++ /dev/null @@ -1,434 +0,0 @@ -# Copyright 2022 The OFA-Sys Team. -# All rights reserved. -# This source code is licensed under the Apache 2.0 license -# found in the LICENSE file in the root directory. - -""" -OFA -""" -from typing import Optional - -import logging - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.models import register_model, register_model_architecture -from fairseq.modules.transformer_sentence_encoder import init_bert_params - -from .unify_transformer import TransformerModel - -logger = logging.getLogger(__name__) - - -@register_model("ofa") -class OFAModel(TransformerModel): - __jit_unused_properties__ = ["supported_targets"] - - def __init__(self, args, encoder, decoder): - super().__init__(args, encoder, decoder) - - # We follow BERT's random weight initialization - self.apply(init_bert_params) - - self.classification_heads = nn.ModuleDict() - if hasattr(self.encoder, "dictionary"): - self.eos: int = self.encoder.dictionary.eos() - - @staticmethod - def add_args(parser): - super(OFAModel, OFAModel).add_args(parser) - parser.add_argument( - "--pooler-dropout", - type=float, - metavar="D", - help="dropout probability in the masked_lm pooler layers", - ) - parser.add_argument( - "--pooler-classifier", - type=str, - choices=['mlp', 'linear'], - help="type of pooler classifier", - ) - parser.add_argument( - "--pooler-activation-fn", - choices=utils.get_available_activation_fns(), - help="activation function to use for pooler layer", - ) - parser.add_argument( - "--spectral-norm-classification-head", - action="store_true", - help="Apply spectral normalization on the classification head", - ) - - @property - def supported_targets(self): - return {"self"} - - def forward( - self, - src_tokens, - src_lengths, - prev_output_tokens, - patch_images: Optional[torch.Tensor] = None, - patch_images_2: Optional[torch.Tensor] = None, - patch_masks: Optional[torch.Tensor] = None, - code_masks: Optional[torch.Tensor] = None, - sample_patch_num: Optional[int] = None, - features_only: bool = False, - classification_head_name: Optional[str] = None, - token_embeddings: Optional[torch.Tensor] = None, - return_all_hiddens: bool = False, - alignment_layer: Optional[int] = None, - alignment_heads: Optional[int] = None, - ): - if classification_head_name is not None: - features_only = True - - encoder_out = self.encoder( - src_tokens, - src_lengths=src_lengths, - patch_images=patch_images, - patch_masks=patch_masks, - patch_images_2=patch_images_2, - token_embeddings=token_embeddings, - return_all_hiddens=return_all_hiddens, - sample_patch_num=sample_patch_num - ) - x, extra = self.decoder( - prev_output_tokens, - code_masks=code_masks, - encoder_out=encoder_out, - features_only=features_only, - alignment_layer=alignment_layer, - alignment_heads=alignment_heads, - src_lengths=src_lengths, - return_all_hiddens=return_all_hiddens, - ) - - pad = self.encoder.padding_idx - if classification_head_name is not None: - prev_lengths = prev_output_tokens.ne(pad).sum(1) - gather_index = prev_lengths[:, None, None].expand(x.size(0), 1, x.size(2)) - 1 - sentence_representation = x.gather(1, gather_index).squeeze() - if self.classification_heads[classification_head_name].use_two_images: - hidden_size = sentence_representation.size(1) - sentence_representation = sentence_representation.view(-1, hidden_size * 2) - for k, head in self.classification_heads.items(): - # for torch script only supports iteration - if k == classification_head_name: - x = head(sentence_representation) - break - - return x, extra - - def register_embedding_tokens(self, ans2label_dict, src_dict, bpe): - """Register embedding tokens""" - logger.info("Registering embedding tokens") - self.ans_tensor_list = [] - for i in range(len(ans2label_dict)): - ans = src_dict[-len(ans2label_dict)+i] - ans = ans[5:-1].replace('_', ' ') - ans_tensor = src_dict.encode_line( - line=bpe.encode(' {}'.format(ans.lower())), - add_if_not_exist=False, - append_eos=False - ).long() - self.ans_tensor_list.append(ans_tensor) - - def register_classification_head( - self, name, num_classes=None, inner_dim=None, use_two_images=False, **kwargs - ): - """Register a classification head.""" - logger.info("Registering classification head: {0}".format(name)) - if name in self.classification_heads: - prev_num_classes = self.classification_heads[name].out_proj.out_features - prev_inner_dim = self.classification_heads[name].dense.out_features - if num_classes != prev_num_classes or inner_dim != prev_inner_dim: - logger.warning( - 're-registering head "{}" with num_classes {} (prev: {}) ' - "and inner_dim {} (prev: {})".format( - name, num_classes, prev_num_classes, inner_dim, prev_inner_dim - ) - ) - self.classification_heads[name] = OFAClassificationHead( - input_dim=self.args.encoder_embed_dim, - inner_dim=inner_dim or self.args.encoder_embed_dim, - num_classes=num_classes, - activation_fn=self.args.pooler_activation_fn, - pooler_dropout=self.args.pooler_dropout, - pooler_classifier=self.args.pooler_classifier, - use_two_images=use_two_images, - do_spectral_norm=getattr( - self.args, "spectral_norm_classification_head", False - ), - ) - - def upgrade_state_dict_named(self, state_dict, name): - super().upgrade_state_dict_named(state_dict, name) - - prefix = name + "." if name != "" else "" - current_head_names = ( - [] - if not hasattr(self, "classification_heads") - else self.classification_heads.keys() - ) - - # Handle new classification heads present in the state dict. - keys_to_delete = [] - for k in state_dict.keys(): - if not k.startswith(prefix + "classification_heads."): - continue - - head_name = k[len(prefix + "classification_heads.") :].split(".")[0] - num_classes = state_dict[ - prefix + "classification_heads." + head_name + ".out_proj.weight" - ].size(0) - inner_dim = state_dict[ - prefix + "classification_heads." + head_name + ".dense.weight" - ].size(0) - - if getattr(self.args, "load_checkpoint_heads", False): - if head_name not in current_head_names: - self.register_classification_head(head_name, num_classes, inner_dim) - else: - if head_name not in current_head_names: - logger.warning( - "deleting classification head ({}) from checkpoint " - "not present in current model: {}".format(head_name, k) - ) - keys_to_delete.append(k) - elif ( - num_classes - != self.classification_heads[head_name].out_proj.out_features - or inner_dim - != self.classification_heads[head_name].dense.out_features - ): - logger.warning( - "deleting classification head ({}) from checkpoint " - "with different dimensions than current model: {}".format( - head_name, k - ) - ) - keys_to_delete.append(k) - for k in keys_to_delete: - del state_dict[k] - - def truncate_emb(key): - if key in state_dict: - state_dict[key] = state_dict[key][:-1, :] - - # When finetuning on translation task, remove last row of - # embedding matrix that corresponds to mask_idx token. - loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0) - if ( - loaded_dict_size == len(self.encoder.dictionary) + 1 - and "" not in self.encoder.dictionary - ): - truncate_emb("encoder.embed_tokens.weight") - truncate_emb("decoder.embed_tokens.weight") - truncate_emb("encoder.output_projection.weight") - truncate_emb("decoder.output_projection.weight") - - if loaded_dict_size < len(self.encoder.dictionary): - num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size - embed_dim = state_dict["encoder.embed_tokens.weight"].size(1) - - new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim) - if getattr(self, "ans_tensor_list", None): - assert len(new_lang_embed_to_add) == len(self.ans_tensor_list) - for i, ans_tensor in enumerate(self.ans_tensor_list): - ans_embed = F.embedding(ans_tensor, state_dict["encoder.embed_tokens.weight"]) - ans_embed = ans_embed.sum(0) / ans_embed.size(0) - new_lang_embed_to_add[i] = ans_embed - else: - nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5) - new_lang_embed_to_add = new_lang_embed_to_add.to( - dtype=state_dict["encoder.embed_tokens.weight"].dtype, - ) - - state_dict["encoder.embed_tokens.weight"] = torch.cat( - [state_dict["encoder.embed_tokens.weight"], new_lang_embed_to_add] - ) - state_dict["decoder.embed_tokens.weight"] = torch.cat( - [state_dict["decoder.embed_tokens.weight"], new_lang_embed_to_add] - ) - state_dict["decoder.output_projection.weight"] = torch.cat( - [state_dict["decoder.output_projection.weight"], new_lang_embed_to_add] - ) - - # Copy any newly-added classification heads into the state dict - # with their current weights. - if hasattr(self, "classification_heads"): - cur_state = self.classification_heads.state_dict() - for k, v in cur_state.items(): - if prefix + "classification_heads." + k not in state_dict: - logger.info("Overwriting " + prefix + "classification_heads." + k) - state_dict[prefix + "classification_heads." + k] = v - - -class OFAClassificationHead(nn.Module): - """Head for sentence-level classification tasks.""" - - def __init__( - self, - input_dim, - inner_dim, - num_classes, - activation_fn, - pooler_dropout, - pooler_classifier, - use_two_images=False, - do_spectral_norm=False, - ): - super().__init__() - self.pooler_classifier = pooler_classifier - self.use_two_images = use_two_images - input_dim = input_dim * 2 if use_two_images else input_dim - if pooler_classifier == "mlp": - self.dense = nn.Linear(input_dim, inner_dim) - self.activation_fn = utils.get_activation_fn(activation_fn) - self.dropout = nn.Dropout(p=pooler_dropout) - self.out_proj = nn.Linear(inner_dim, num_classes) - elif pooler_classifier == "linear": - self.dropout = nn.Dropout(p=pooler_dropout) - self.out_proj = nn.Linear(input_dim, num_classes) - else: - raise NotImplementedError - - if do_spectral_norm: - self.out_proj = torch.nn.utils.spectral_norm(self.out_proj) - - def forward(self, features, **kwargs): - if self.pooler_classifier == 'mlp': - x = features - x = self.dropout(x) - x = self.dense(x) - x = self.activation_fn(x) - x = self.dropout(x) - x = self.out_proj(x) - elif self.pooler_classifier == 'linear': - x = features - x = self.dropout(x) - x = self.out_proj(x) - else: - raise NotImplementedError - return x - - -@register_model_architecture("ofa", "ofa_large") -def ofa_large_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024) - args.encoder_layers = getattr(args, "encoder_layers", 12) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 12) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.relu_dropout = getattr(args, "relu_dropout", 0.0) - args.dropout = getattr(args, "dropout", 0.0) - args.max_target_positions = getattr(args, "max_target_positions", 1024) - args.max_source_positions = getattr(args, "max_source_positions", 1024) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", True - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", True) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - args.no_scale_embedding = getattr(args, "no_scale_embedding", True) - args.layernorm_embedding = getattr(args, "layernorm_embedding", True) - - args.activation_fn = getattr(args, "activation_fn", "gelu") - args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") - args.pooler_dropout = getattr(args, "pooler_dropout", 0.0) - args.pooler_classifier = getattr(args, "pooler_classifier", "mlp") - - args.resnet_drop_path_rate = getattr(args, "resnet_drop_path_rate", 0.0) - args.encoder_drop_path_rate = getattr(args, "encoder_drop_path_rate", 0.0) - args.decoder_drop_path_rate = getattr(args, "decoder_drop_path_rate", 0.0) - - args.resnet_type = getattr(args, "resnet_type", "resnet152") - args.token_bucket_size = getattr(args, "token_bucket_size", 256) - args.image_bucket_size = getattr(args, "image_bucket_size", 42) - - args.freeze_encoder_embedding = getattr(args, "freeze_encoder_embedding", False) - args.freeze_decoder_embedding = getattr(args, "freeze_decoder_embedding", False) - args.add_type_embedding = getattr(args, "add_type_embedding", True) - args.attn_scale_factor = getattr(args, "attn_scale_factor", 2) - - args.code_image_size = getattr(args, "code_image_size", 128) - args.patch_layernorm_embedding = getattr(args, "patch_layernorm_embedding", True) - args.code_layernorm_embedding = getattr(args, "code_layernorm_embedding", True) - args.entangle_position_embedding = getattr(args, "entangle_position_embedding", False) - args.disable_entangle = getattr(args, "disable_entangle", False) - args.sync_bn = getattr(args, "sync_bn", False) - - args.scale_attn = getattr(args, "scale_attn", False) - args.scale_fc = getattr(args, "scale_fc", False) - args.scale_heads = getattr(args, "scale_heads", False) - args.scale_resids = getattr(args, "scale_resids", False) - - -@register_model_architecture("ofa", "ofa_base") -def ofa_base_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12) - args.resnet_type = getattr(args, "resnet_type", "resnet101") - ofa_large_architecture(args) - - -@register_model_architecture("ofa", "ofa_huge") -def ofa_huge_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1280) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1280) - args.encoder_layers = getattr(args, "encoder_layers", 24) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.decoder_layers = getattr(args, "decoder_layers", 12) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.resnet_type = getattr(args, "resnet_type", "resnet152") - ofa_large_architecture(args) - - -@register_model_architecture("ofa", "ofa_medium") -def ofa_medium_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 512) - args.encoder_layers = getattr(args, "encoder_layers", 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.decoder_layers = getattr(args, "decoder_layers", 4) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.resnet_type = getattr(args, "resnet_type", "resnet101") - ofa_large_architecture(args) - - -@register_model_architecture("ofa", "ofa_tiny") -def ofa_medium_architecture(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 256) - args.encoder_layers = getattr(args, "encoder_layers", 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.decoder_layers = getattr(args, "decoder_layers", 4) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.resnet_type = getattr(args, "resnet_type", "resnet50") - ofa_large_architecture(args) diff --git a/spaces/HarshWK/Basic_Models/app.py b/spaces/HarshWK/Basic_Models/app.py deleted file mode 100644 index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000 --- a/spaces/HarshWK/Basic_Models/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/commons.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/commons.py deleted file mode 100644 index 8da7b35049d768a29de6f66cbe8795a825967818..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/glow_tts/commons.py +++ /dev/null @@ -1,273 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from librosa.filters import mel as librosa_mel_fn -from audio_processing import dynamic_range_compression -from audio_processing import dynamic_range_decompression -from stft import STFT - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def mle_loss(z, m, logs, logdet, mask): - l = torch.sum(logs) + 0.5 * torch.sum( - torch.exp(-2 * logs) * ((z - m) ** 2) - ) # neg normal likelihood w/o the constant term - l = l - torch.sum(logdet) # log jacobian determinant - l = l / torch.sum( - torch.ones_like(z) * mask - ) # averaging across batch, channel and time axes - l = l + 0.5 * math.log(2 * math.pi) # add the remaining constant term - return l - - -def duration_loss(logw, logw_, lengths): - l = torch.sum((logw - logw_) ** 2) / torch.sum(lengths) - return l - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def maximum_path(value, mask, max_neg_val=-np.inf): - """Numpy-friendly version. It's about 4 times faster than torch version. - value: [b, t_x, t_y] - mask: [b, t_x, t_y] - """ - value = value * mask - - device = value.device - dtype = value.dtype - value = value.cpu().detach().numpy() - mask = mask.cpu().detach().numpy().astype(np.bool) - - b, t_x, t_y = value.shape - direction = np.zeros(value.shape, dtype=np.int64) - v = np.zeros((b, t_x), dtype=np.float32) - x_range = np.arange(t_x, dtype=np.float32).reshape(1, -1) - for j in range(t_y): - v0 = np.pad(v, [[0, 0], [1, 0]], mode="constant", constant_values=max_neg_val)[ - :, :-1 - ] - v1 = v - max_mask = v1 >= v0 - v_max = np.where(max_mask, v1, v0) - direction[:, :, j] = max_mask - - index_mask = x_range <= j - v = np.where(index_mask, v_max + value[:, :, j], max_neg_val) - direction = np.where(mask, direction, 1) - - path = np.zeros(value.shape, dtype=np.float32) - index = mask[:, :, 0].sum(1).astype(np.int64) - 1 - index_range = np.arange(b) - for j in reversed(range(t_y)): - path[index_range, index, j] = 1 - index = index + direction[index_range, index, j] - 1 - path = path * mask.astype(np.float32) - path = torch.from_numpy(path).to(device=device, dtype=dtype) - return path - - -def generate_path(duration, mask): - """ - duration: [b, t_x] - mask: [b, t_x, t_y] - """ - device = duration.device - - b, t_x, t_y = mask.shape - cum_duration = torch.cumsum(duration, 1) - path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path * mask - return path - - -class Adam: - def __init__( - self, - params, - scheduler, - dim_model, - warmup_steps=4000, - lr=1e0, - betas=(0.9, 0.98), - eps=1e-9, - ): - self.params = params - self.scheduler = scheduler - self.dim_model = dim_model - self.warmup_steps = warmup_steps - self.lr = lr - self.betas = betas - self.eps = eps - - self.step_num = 1 - self.cur_lr = lr * self._get_lr_scale() - - self._optim = torch.optim.Adam(params, lr=self.cur_lr, betas=betas, eps=eps) - - def _get_lr_scale(self): - if self.scheduler == "noam": - return np.power(self.dim_model, -0.5) * np.min( - [ - np.power(self.step_num, -0.5), - self.step_num * np.power(self.warmup_steps, -1.5), - ] - ) - else: - return 1 - - def _update_learning_rate(self): - self.step_num += 1 - if self.scheduler == "noam": - self.cur_lr = self.lr * self._get_lr_scale() - for param_group in self._optim.param_groups: - param_group["lr"] = self.cur_lr - - def get_lr(self): - return self.cur_lr - - def step(self): - self._optim.step() - self._update_learning_rate() - - def zero_grad(self): - self._optim.zero_grad() - - def load_state_dict(self, d): - self._optim.load_state_dict(d) - - def state_dict(self): - return self._optim.state_dict() - - -class TacotronSTFT(nn.Module): - def __init__( - self, - filter_length=1024, - hop_length=256, - win_length=1024, - n_mel_channels=80, - sampling_rate=22050, - mel_fmin=0.0, - mel_fmax=8000.0, - ): - super(TacotronSTFT, self).__init__() - self.n_mel_channels = n_mel_channels - self.sampling_rate = sampling_rate - self.stft_fn = STFT(filter_length, hop_length, win_length) - mel_basis = librosa_mel_fn( - sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - - def spectral_normalize(self, magnitudes): - output = dynamic_range_compression(magnitudes) - return output - - def spectral_de_normalize(self, magnitudes): - output = dynamic_range_decompression(magnitudes) - return output - - def mel_spectrogram(self, y): - """Computes mel-spectrograms from a batch of waves - PARAMS - ------ - y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] - - RETURNS - ------- - mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) - """ - assert torch.min(y.data) >= -1 - assert torch.max(y.data) <= 1 - - magnitudes, phases = self.stft_fn.transform(y) - magnitudes = magnitudes.data - mel_output = torch.matmul(self.mel_basis, magnitudes) - mel_output = self.spectral_normalize(mel_output) - return mel_output - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm - - -def squeeze(x, x_mask=None, n_sqz=2): - b, c, t = x.size() - - t = (t // n_sqz) * n_sqz - x = x[:, :, :t] - x_sqz = x.view(b, c, t // n_sqz, n_sqz) - x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz) - - if x_mask is not None: - x_mask = x_mask[:, :, n_sqz - 1 :: n_sqz] - else: - x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype) - return x_sqz * x_mask, x_mask - - -def unsqueeze(x, x_mask=None, n_sqz=2): - b, c, t = x.size() - - x_unsqz = x.view(b, n_sqz, c // n_sqz, t) - x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz) - - if x_mask is not None: - x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz) - else: - x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype) - return x_unsqz * x_mask, x_mask diff --git a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/Utils.py b/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/Utils.py deleted file mode 100644 index a7aff300380f793c941953e0d63ddf6d71281592..0000000000000000000000000000000000000000 --- a/spaces/Heshwa/html-code-generation-from-images-with-deep-neural-networks/classes/Utils.py +++ /dev/null @@ -1,39 +0,0 @@ -__author__ = 'Taneem Jan, taneemishere.github.io' - -import numpy as np - - -class Utils: - @staticmethod - def sparsify(label_vector, output_size): - sparse_vector = [] - - for label in label_vector: - sparse_label = np.zeros(output_size) - sparse_label[label] = 1 - - sparse_vector.append(sparse_label) - - return np.array(sparse_vector) - - @staticmethod - def get_preprocessed_img(img_path, image_size): - import cv2 - # from keras.preprocessing.image import array_to_img, img_to_array - # img = array_to_img(img_path) - # img = img_to_array(img) - # img = cv2.imread(img_path) - # don't need to read the image as we're now directly passing the - # image as numpy array to this method - img = cv2.resize(img_path, (image_size, image_size)) - img = img.astype('float32') - img /= 255 - return img - - @staticmethod - def show(image): - import cv2 - cv2.namedWindow("view", cv2.WINDOW_AUTOSIZE) - cv2.imshow("view", image) - cv2.waitKey(0) - cv2.destroyWindow("view") diff --git a/spaces/Hexamind/GDOC/src/domain/paragraph.py b/spaces/Hexamind/GDOC/src/domain/paragraph.py deleted file mode 100644 index 051b53eecdff7caee9515cd5399d4e7f47085bb9..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/GDOC/src/domain/paragraph.py +++ /dev/null @@ -1,149 +0,0 @@ -import string -from src.tools.doc_tools import get_positions, convert_to_png -from docx.enum.text import WD_ALIGN_PARAGRAPH -import xml.etree.ElementTree as ET -from docx.oxml.ns import qn -import zipfile -import os -import re - - -INFINITE = 10000 - -class Paragraph: - - def __init__(self, xparagraph, doc_id: int, id_: int): - - self.xparagraph = xparagraph - self.id_ = int(str(2) + str(doc_id) + str(id_)) - style_name = self.xparagraph.style.name - self.level = self.get_level_from_name(style_name) - self.is_structure = self.level < INFINITE - self.text = self.xparagraph.text - self.type, self.parsed_text = self.parse_text() - - - @property - def structure(self): - structure = {str(self.id_): { - 'index': str(self.id_), - 'canMove': True, - 'isFolder': False, - 'children': [], - 'title': self.text, - 'canRename': True, - 'data': {}, - 'level': self.level, - }} - return structure - - @property - def blank(self): - """ - checks if the paragraph is blank: i.e. it brings some signal (it may otherwise be ignored) - """ - text = self.text.replace('\n', '') - return set(text).isdisjoint(string.ascii_letters) - - @staticmethod - def get_level_from_name(style_name: str) -> int: - level = INFINITE - if 'Titre' in style_name or 'Heading' in style_name: - suffix = style_name[-1] - try: - level = int(suffix) - except: - pass - return level - - def parse_text(self) -> (str, str): - - if self.is_structure: - return 'structure', self.text - - startswith = {"?? ": "task", "++ ": "comment"} - for start in startswith.keys(): - split = self.text.rsplit(start) - if 1 < len(split): - return startswith[start], split[1] - - return "normal", self.text - - def set_text(self, text: str): - self.text = text - self.xparagraph.text = text - return self - - def contains_image(self) -> bool: - return any("pic:pic" in run.element.xml for run in self.xparagraph.runs) - # is_image = False - # for run in self.xparagraph.runs: - # if "pic:pic" in run.element.xml: - # xml = run.element.xml - # print(run.element.xml) - # #find the anchor element - # print(xml) - # root = ET.fromstring(xml) - # anch = ET.SubElement(root, "wp:anchor") - # item = ET.SubElement(anch, "wp:positionH") - # item2 = ET.SubElement(anch, "wp:positionV") - # # find the anchor element - # attri = root.findall(".//{http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing}anchor") - # # create a child to the positionH and positionV elements - # if attri: - # #print all the children of the anchor element - # for anchors in attri: - # childH = ET.SubElement(anchors, "wp:positionH") - # childV = ET.SubElement(anchors, "wp:positionV") - # ET.SubElement(childH, "wp:align").text = "center" - # ET.SubElement(childV, "wp:align").text = "center" - # xml = ET.tostring(root, encoding='unicode', method='xml') - # # add a child to the positionH and positionV using xml variable - # ET.SubElement(item, "wp:align").text = "center" - # ET.SubElement(item2, "wp:align").text = "center" - # print(ET.tostring(root)) - # else: - # is_image = True - # return is_image - - - - - - - def center_paragraph(self): - if self.contains_image(): - self.xparagraph.alignment = WD_ALIGN_PARAGRAPH.CENTER - - def justify_paragraph(self): - if(self.xparagraph.style.name == "Normal"): - self.xparagraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY - - # def insert_paragraphs(self,images,template_doc): - # empty_paragraph = Paragraph(template_doc.xdoc.add_paragraph(""),template_doc.id_,template_doc.container.paragraphs[-1].id_+1) - # template_doc.add_paragraph(empty_paragraph) - # template_xp = template_doc.xdoc.paragraphs[-1] - # for run in self.xparagraph.runs: - # new_run = template_xp.add_run(run.text) - # if "pic:pic" in run.element.xml: - # xml = run.element.xml - # print(xml) - # #check if there is the same image multiple times in the document - # image_name = xml.split("pic:pic")[1].split('name="')[1].split('"')[0] - # image_name = re.sub('[\s+]', '', image_name) - # image_to_put = image_name.lower() + '.png' - # #loop over all the cx and cy occurences and stop when both strings in between are numbers - # width,height = get_positions(xml) - # index_to_use = images.index("word/media/" + image_to_put) - # new_run.add_picture(images[index_to_use], width=width, height=height) - # # os.remove(images[0]) - # # return images - - def get_styles_in_paragraph(self): - styles = [self.xparagraph.style.name] - for run in self.xparagraph.runs: - if run.style.name != "Default Paragraph Font": - styles.append(run.style.name) - return styles - - diff --git a/spaces/HighCWu/GPEN/face_enhancement.py b/spaces/HighCWu/GPEN/face_enhancement.py deleted file mode 100644 index 7782abbe744bda55f27864abb931e0b3adb23dc3..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GPEN/face_enhancement.py +++ /dev/null @@ -1,111 +0,0 @@ -''' -@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021) -@author: yangxy (yangtao9009@gmail.com) -''' -import os -import cv2 -import glob -import time -import numpy as np -from PIL import Image -import __init_paths -from retinaface.retinaface_detection import RetinaFaceDetection -from face_model.face_gan import FaceGAN -from sr_model.real_esrnet import RealESRNet -from align_faces import warp_and_crop_face, get_reference_facial_points - -class FaceEnhancement(object): - def __init__(self, base_dir='./', size=512, out_size=None, model=None, channel_multiplier=2, narrow=1, key=None, device='cpu', u=False): - self.facedetector = RetinaFaceDetection(base_dir, device) - self.facegan = FaceGAN(base_dir, size, out_size, model, channel_multiplier, narrow, key, device=device) - self.srmodel = RealESRNet(base_dir, 'realesrnet', 2, 0, device=device) - self.use_sr = u - self.size = size - self.out_size = size if out_size==None else out_size - self.threshold = 0.9 - - # the mask for pasting restored faces back - self.mask = np.zeros((512, 512), np.float32) - cv2.rectangle(self.mask, (26, 26), (486, 486), (1, 1, 1), -1, cv2.LINE_AA) - self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11) - self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11) - - self.kernel = np.array(( - [0.0625, 0.125, 0.0625], - [0.125, 0.25, 0.125], - [0.0625, 0.125, 0.0625]), dtype="float32") - - # get the reference 5 landmarks position in the crop settings - default_square = True - inner_padding_factor = 0.25 - outer_padding = (0, 0) - self.reference_5pts = get_reference_facial_points( - (self.size, self.size), inner_padding_factor, outer_padding, default_square) - - def mask_postprocess(self, mask, thres=20): - mask[:thres, :] = 0; mask[-thres:, :] = 0 - mask[:, :thres] = 0; mask[:, -thres:] = 0 - mask = cv2.GaussianBlur(mask, (101, 101), 11) - mask = cv2.GaussianBlur(mask, (101, 101), 11) - return mask.astype(np.float32) - - def process(self, img, aligned=False): - orig_faces, enhanced_faces = [], [] - if aligned: - ef = self.facegan.process(img) - orig_faces.append(img) - enhanced_faces.append(ef) - - if self.use_sr: - ef = self.srmodel.process(ef) - - return ef, orig_faces, enhanced_faces - - if self.use_sr: - img_sr = self.srmodel.process(img) - if img_sr is not None: - img = cv2.resize(img, img_sr.shape[:2][::-1]) - - facebs, landms = self.facedetector.detect(img) - - height, width = img.shape[:2] - full_mask = np.zeros((height, width), dtype=np.float32) - full_img = np.zeros(img.shape, dtype=np.uint8) - - for i, (faceb, facial5points) in enumerate(zip(facebs, landms)): - if faceb[4]0)] = tmp_mask[np.where(mask>0)] - full_img[np.where(mask>0)] = tmp_img[np.where(mask>0)] - - full_mask = full_mask[:, :, np.newaxis] - if self.use_sr and img_sr is not None: - img = cv2.convertScaleAbs(img_sr*(1-full_mask) + full_img*full_mask) - else: - img = cv2.convertScaleAbs(img*(1-full_mask) + full_img*full_mask) - - return img, orig_faces, enhanced_faces - diff --git a/spaces/HighCWu/GPEN/face_inpainting.py b/spaces/HighCWu/GPEN/face_inpainting.py deleted file mode 100644 index 951fc3c2ec5517f3a31bd9a301573e8d3c9bb9a3..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GPEN/face_inpainting.py +++ /dev/null @@ -1,18 +0,0 @@ -''' -@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021) -@author: yangxy (yangtao9009@gmail.com) -''' -from face_model.face_gan import FaceGAN - -class FaceInpainting(object): - def __init__(self, base_dir='./', size=1024, out_size=1024, model=None, channel_multiplier=2, narrow=1, key=None, device='cuda'): - self.facegan = FaceGAN(base_dir, size, out_size, model, channel_multiplier, narrow, key, device=device) - - # make sure the face image is well aligned. Please refer to face_enhancement.py - def process(self, brokenf, aligned=True): - # complete the face - out = self.facegan.process(brokenf) - - return out - - \ No newline at end of file diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/ModifyUpload.2cfe71e4.js b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/ModifyUpload.2cfe71e4.js deleted file mode 100644 index f7df3611d20ed37b209443e3203988be68146707..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/ModifyUpload.2cfe71e4.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as b,i as w,s as k,e as x,c as y,b as s,f as _,g as m,m as I,l as z,j as d,k as h,n as v,o as C,K as M,w as g,Y as u,x as f,a as E,d as p,D as L,E as B,F as D}from"./index.396f4a72.js";function S(o){let e,l,t,r,n,a;return t=new o[0]({}),{c(){e=x("button"),l=x("div"),y(t.$$.fragment),s(l,"class","m-t-1 w-[60%] h-[60%] opacity-80 dark:text-white"),s(e,"class","text-gray-500 bg-white/90 h-5 w-5 flex items-center justify-center rounded shadow-sm hover:shadow-xl hover:ring-1 ring-inset ring-gray-200 z-10 dark:bg-gray-900 dark:ring-gray-600"),s(e,"aria-label",o[1])},m(i,c){_(i,e,c),m(e,l),I(t,l,null),r=!0,n||(a=z(e,"click",o[2]),n=!0)},p(i,[c]){(!r||c&2)&&s(e,"aria-label",i[1])},i(i){r||(d(t.$$.fragment,i),r=!0)},o(i){h(t.$$.fragment,i),r=!1},d(i){i&&v(e),C(t),n=!1,a()}}}function q(o,e,l){let{Icon:t}=e,{label:r=""}=e;function n(a){M.call(this,o,a)}return o.$$set=a=>{"Icon"in a&&l(0,t=a.Icon),"label"in a&&l(1,r=a.label)},[t,r,n]}class j extends b{constructor(e){super(),w(this,e,q,S,k,{Icon:0,label:1})}}function F(o){let e,l,t,r;return{c(){e=g("svg"),l=g("g"),t=g("path"),r=g("path"),s(t,"d","M18,6L6.087,17.913"),u(t,"fill","none"),u(t,"fill-rule","nonzero"),u(t,"stroke-width","2px"),s(l,"transform","matrix(1.14096,-0.140958,-0.140958,1.14096,-0.0559523,0.0559523)"),s(r,"d","M4.364,4.364L19.636,19.636"),u(r,"fill","none"),u(r,"fill-rule","nonzero"),u(r,"stroke-width","2px"),s(e,"width","100%"),s(e,"height","100%"),s(e,"viewBox","0 0 24 24"),s(e,"version","1.1"),s(e,"xmlns","http://www.w3.org/2000/svg"),s(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),s(e,"xml:space","preserve"),s(e,"stroke","currentColor"),u(e,"fill-rule","evenodd"),u(e,"clip-rule","evenodd"),u(e,"stroke-linecap","round"),u(e,"stroke-linejoin","round")},m(n,a){_(n,e,a),m(e,l),m(l,t),m(e,r)},p:f,i:f,o:f,d(n){n&&v(e)}}}class K extends b{constructor(e){super(),w(this,e,null,F,k,{})}}function P(o){let e,l;return{c(){e=g("svg"),l=g("path"),s(l,"d","M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"),s(e,"xmlns","http://www.w3.org/2000/svg"),s(e,"width","100%"),s(e,"height","100%"),s(e,"viewBox","0 0 24 24"),s(e,"fill","none"),s(e,"stroke","currentColor"),s(e,"stroke-width","1.5"),s(e,"stroke-linecap","round"),s(e,"stroke-linejoin","round"),s(e,"class","feather feather-edit-2")},m(t,r){_(t,e,r),m(e,l)},p:f,i:f,o:f,d(t){t&&v(e)}}}class U extends b{constructor(e){super(),w(this,e,null,P,k,{})}}function $(o){let e,l;return e=new j({props:{Icon:U,label:"Edit"}}),e.$on("click",o[3]),{c(){y(e.$$.fragment)},m(t,r){I(e,t,r),l=!0},p:f,i(t){l||(d(e.$$.fragment,t),l=!0)},o(t){h(e.$$.fragment,t),l=!1},d(t){C(e,t)}}}function Y(o){let e,l,t,r,n=o[0]&&$(o);return t=new j({props:{Icon:K,label:"Clear"}}),t.$on("click",o[4]),{c(){e=x("div"),n&&n.c(),l=E(),y(t.$$.fragment),s(e,"class","modify-upload z-10 top-2 right-2 justify-end flex gap-1"),p(e,"absolute",o[1]),p(e,"m-1",!o[1])},m(a,i){_(a,e,i),n&&n.m(e,null),m(e,l),I(t,e,null),r=!0},p(a,[i]){a[0]?n?(n.p(a,i),i&1&&d(n,1)):(n=$(a),n.c(),d(n,1),n.m(e,l)):n&&(L(),h(n,1,1,()=>{n=null}),B()),i&2&&p(e,"absolute",a[1]),i&2&&p(e,"m-1",!a[1])},i(a){r||(d(n),d(t.$$.fragment,a),r=!0)},o(a){h(n),h(t.$$.fragment,a),r=!1},d(a){a&&v(e),n&&n.d(),C(t)}}}function A(o,e,l){let{editable:t=!1}=e,{absolute:r=!0}=e;const n=D(),a=()=>n("edit"),i=c=>{n("clear"),c.stopPropagation()};return o.$$set=c=>{"editable"in c&&l(0,t=c.editable),"absolute"in c&&l(1,r=c.absolute)},[t,r,n,a,i]}class H extends b{constructor(e){super(),w(this,e,A,Y,k,{editable:0,absolute:1})}}export{K as C,j as I,H as M}; -//# sourceMappingURL=ModifyUpload.2cfe71e4.js.map diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/visualizers/__init__.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/visualizers/__init__.py deleted file mode 100644 index 4770d1f15a6790ab9606c7b9881f798c8e2d9545..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/training/visualizers/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -import logging - -from saicinpainting.training.visualizers.directory import DirectoryVisualizer -from saicinpainting.training.visualizers.noop import NoopVisualizer - - -def make_visualizer(kind, **kwargs): - logging.info(f'Make visualizer {kind}') - - if kind == 'directory': - return DirectoryVisualizer(**kwargs) - if kind == 'noop': - return NoopVisualizer() - - raise ValueError(f'Unknown visualizer kind {kind}') diff --git a/spaces/JMalott/ai_architecture/dalle/models/__init__.py b/spaces/JMalott/ai_architecture/dalle/models/__init__.py deleted file mode 100644 index a1c05af82c371e6d42b93a70c983205e262d151d..0000000000000000000000000000000000000000 --- a/spaces/JMalott/ai_architecture/dalle/models/__init__.py +++ /dev/null @@ -1,206 +0,0 @@ -# ------------------------------------------------------------------------------------ -# minDALL-E -# Copyright (c) 2021 Kakao Brain Corp. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------ - -import os -import torch -import torch.nn as nn -import pytorch_lightning as pl -from typing import Optional, Tuple -from omegaconf import OmegaConf -from torch.cuda.amp import autocast -from torch.optim.lr_scheduler import CosineAnnealingLR -from torch.nn import functional as F -from .stage1.vqgan import VQGAN -from .stage2.transformer import Transformer1d, iGPT -from .. import utils -from ..utils.config import get_base_config -from ..utils.sampling import sampling, sampling_igpt -from .tokenizer import build_tokenizer - -_MODELS = { - 'minDALL-E/1.3B': 'https://arena.kakaocdn.net/brainrepo/models/minDALL-E/57b008f02ceaa02b779c8b7463143315/1.3B.tar.gz' -} - - -class Dalle(nn.Module): - def __init__(self, - config: OmegaConf) -> None: - super().__init__() - self.tokenizer = None - self.stage1 = VQGAN(n_embed=config.stage1.n_embed, - embed_dim=config.stage1.embed_dim, - hparams=config.stage1.hparams) - self.stage2 = Transformer1d(vocab_size_txt=config.stage2.vocab_size_txt, - vocab_size_img=config.stage2.vocab_size_img, - hparams=config.stage2.hparams) - self.config_stage1 = config.stage1 - self.config_stage2 = config.stage2 - self.config_dataset = config.dataset - - @classmethod - def from_pretrained(cls, - path: str) -> nn.Module: - #path = _MODELS[path] if path in _MODELS else path - #path = utils.realpath_url_or_path(path, root=os.path.expanduser(".cache/minDALL-E")) - path = '' - - config_base = get_base_config() - config_new = OmegaConf.load(os.path.join(path, '.cache/minDALL-E/1.3B/config.yaml')) - config_update = OmegaConf.merge(config_base, config_new) - - model = cls(config_update) - model.tokenizer = build_tokenizer('.cache/minDALL-E/1.3B/tokenizer', - context_length=model.config_dataset.context_length, - lowercase=True, - dropout=None) - model.stage1.from_ckpt('.cache/minDALL-E/1.3B/stage1_last.ckpt') - model.stage2.from_ckpt('.cache/minDALL-E/1.3B/stage2_last.ckpt') - #model.stage1.from_ckpt('https://utexas.box.com/shared/static/rpt9miyj2kikogyekpqnkd6y115xp51i.ckpt') - #model.stage2.from_ckpt('https://utexas.box.com/shared/static/54jc9fw0bious5nx6wvayeqaskcrdgv4.ckpt') - - return model - - @torch.no_grad() - def sampling(self, - prompt: str, - top_k: int = 256, - top_p: Optional[float] = None, - softmax_temperature: float = 1.0, - num_candidates: int = 96, - device: str = 'cuda:0', - use_fp16: bool = True) -> torch.FloatTensor: - self.stage1.eval() - self.stage2.eval() - - tokens = self.tokenizer.encode(prompt) - tokens = torch.LongTensor(tokens.ids) - tokens = torch.repeat_interleave(tokens.unsqueeze(0), num_candidates, dim=0) - - # Check if the encoding works as intended - # print(self.tokenizer.decode_batch(tokens.tolist(), skip_special_tokens=True)[0]) - - tokens = tokens.to(device) - codes = sampling(self.stage2, - tokens, - top_k=top_k, - top_p=top_p, - softmax_temperature=softmax_temperature, - use_fp16=use_fp16) - codes = codes.view(num_candidates, 16, 16) # [B, 16, 16] - pixels = torch.clamp(self.stage1.decode_code(codes) * 0.5 + 0.5, 0, 1) # [B, 256, 256] - return pixels - - -class ImageGPT(pl.LightningModule): - def __init__(self, - config: OmegaConf) -> None: - super().__init__() - self.stage1 = VQGAN(n_embed=config.stage1.n_embed, - embed_dim=config.stage1.embed_dim, - hparams=config.stage1.hparams) - self.stage2 = iGPT(vocab_size_img=config.stage2.vocab_size_img, - use_cls_cond=config.stage2.use_cls_cond, - hparams=config.stage2.hparams) - self.config = config - self.use_cls_cond = config.stage2.use_cls_cond - - # make the parameters in stage 1 not trainable - self.stage1.eval() - for p in self.stage1.parameters(): - p.requires_grad = False - - @classmethod - def from_pretrained(cls, - path_upstream: str, - path_downstream: str) -> Tuple[nn.Module, OmegaConf]: - config_base = get_base_config(use_default=False) - config_down = OmegaConf.load(path_downstream) - config_down = OmegaConf.merge(config_base, config_down) - - model = cls(config_down) - model.stage1.from_ckpt(os.path.join(path_upstream, 'stage1_last.ckpt'), strict=True) - model.stage2.from_ckpt(os.path.join(path_upstream, 'stage2_last.ckpt'), strict=False) - return model, config_down - - def sample(self, - cls_idx: Optional[int] = None, - top_k: int = 256, - top_p: Optional[float] = None, - softmax_temperature: float = 1.0, - num_candidates: int = 16, - device: str = 'cuda:0', - use_fp16: bool = True, - is_tqdm: bool = True) -> torch.FloatTensor: - self.stage1.eval() - self.stage2.eval() - - if cls_idx is None: - sos = self.stage2.sos.repeat(num_candidates, 1, 1) - else: - sos = torch.LongTensor([cls_idx]).to(device=device) - sos = sos.repeat(num_candidates) - sos = self.stage2.sos(sos).unsqueeze(1) - - codes = sampling_igpt(self.stage2, - sos=sos, - top_k=top_k, - top_p=top_p, - softmax_temperature=softmax_temperature, - use_fp16=use_fp16, - is_tqdm=is_tqdm) - codes = codes.view(num_candidates, 16, 16) # [B, 16, 16] - pixels = torch.clamp(self.stage1.decode_code(codes) * 0.5 + 0.5, 0, 1) # [B, 256, 256] - return pixels - - def forward(self, - images: torch.FloatTensor, - labels: Optional[torch.LongTensor] = None) -> torch.FloatTensor: - B, C, H, W = images.shape - with torch.no_grad(): - with autocast(enabled=False): - codes = self.stage1.get_codes(images).detach() - logits = self.stage2(codes, labels) - return logits, codes - - def training_step(self, batch, batch_idx): - images, labels = batch - logits, codes = self(images, labels=labels if self.use_cls_cond else None) - loss = F.cross_entropy(logits.view(-1, logits.shape[-1]), codes.view(-1)) - self.log("train/loss", loss, on_step=True, on_epoch=True, prog_bar=False, logger=True) - return loss - - def validation_step(self, batch, batch_idx): - images, labels = batch - logits, codes = self(images, labels=labels if self.use_cls_cond else None) - loss = F.cross_entropy(logits.view(-1, logits.shape[-1]), codes.view(-1)) - self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False, logger=True) - return loss - - def configure_optimizers(self): - assert self.config.optimizer.opt_type == 'adamW' - assert self.config.optimizer.sched_type == 'cosine' - - opt = torch.optim.AdamW(self.parameters(), - lr=self.config.optimizer.base_lr, - betas=self.config.optimizer.betas, - weight_decay=self.config.optimizer.weight_decay) - sched = CosineAnnealingLR(opt, - T_max=self.config.optimizer.max_steps, - eta_min=self.config.optimizer.min_lr) - sched = { - 'scheduler': sched, - 'name': 'cosine' - } - return [opt], [sched] - - def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, - on_tpu=False, using_native_amp=False, using_lbfgs=False): - optimizer.step(closure=optimizer_closure) - self.lr_schedulers().step() - self.log("lr", self.lr_schedulers().get_last_lr()[0], on_step=True, on_epoch=False, prog_bar=True, logger=True) - - def on_epoch_start(self): - self.stage1.eval() \ No newline at end of file diff --git a/spaces/Jamel887/Rv-percobaan887/lib/infer_pack/commons.py b/spaces/Jamel887/Rv-percobaan887/lib/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/Jamel887/Rv-percobaan887/lib/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/app.py b/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/app.py deleted file mode 100644 index dc09e7c5f485b7e7a6b916d542c693a87286281f..0000000000000000000000000000000000000000 --- a/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/app.py +++ /dev/null @@ -1,879 +0,0 @@ -# code from @nyanko7 - -import random -import tempfile -import time -import gradio as gr -import numpy as np -import torch -import math -import re - -from gradio import inputs -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - UNet2DConditionModel, -) -from modules.model import ( - CrossAttnProcessor, - StableDiffusionPipeline, -) -from torchvision import transforms -from transformers import CLIPTokenizer, CLIPTextModel -from PIL import Image -from pathlib import Path -from safetensors.torch import load_file -import modules.safe as _ -from modules.lora import LoRANetwork - - - -models = [ - - ("LSPV2-anime", "Jeffsun/LSPV2", 2), - ("LSPV3-real", "Jeffsun/LSPV3", 2) -] - -keep_vram = ["Jeffsun/LSPV2","Jeffsun/LSPV3"] -base_name, base_model, clip_skip = models[0] - -samplers_k_diffusion = [ - ("Euler a", "sample_euler_ancestral", {}), - ("Euler", "sample_euler", {}), - ("LMS", "sample_lms", {}), - ("Heun", "sample_heun", {}), - ("DPM2", "sample_dpm_2", {"discard_next_to_last_sigma": True}), - ("DPM2 a", "sample_dpm_2_ancestral", {"discard_next_to_last_sigma": True}), - ("DPM++ 2S a", "sample_dpmpp_2s_ancestral", {}), - ("DPM++ 2M", "sample_dpmpp_2m", {}), - ("DPM++ SDE", "sample_dpmpp_sde", {}), - ("LMS Karras", "sample_lms", {"scheduler": "karras"}), - ("DPM2 Karras", "sample_dpm_2", {"scheduler": "karras", "discard_next_to_last_sigma": True}), - ("DPM2 a Karras", "sample_dpm_2_ancestral", {"scheduler": "karras", "discard_next_to_last_sigma": True}), - ("DPM++ 2S a Karras", "sample_dpmpp_2s_ancestral", {"scheduler": "karras"}), - ("DPM++ 2M Karras", "sample_dpmpp_2m", {"scheduler": "karras"}), - ("DPM++ SDE Karras", "sample_dpmpp_sde", {"scheduler": "karras"}), -] - -# samplers_diffusers = [ -# ("DDIMScheduler", "diffusers.schedulers.DDIMScheduler", {}) -# ("DDPMScheduler", "diffusers.schedulers.DDPMScheduler", {}) -# ("DEISMultistepScheduler", "diffusers.schedulers.DEISMultistepScheduler", {}) -# ] - -start_time = time.time() -timeout = 90 - -scheduler = DDIMScheduler.from_pretrained( - base_model, - subfolder="scheduler", -) -vae = AutoencoderKL.from_pretrained( - "stabilityai/sd-vae-ft-ema", - torch_dtype=torch.float16 -) -text_encoder = CLIPTextModel.from_pretrained( - base_model, - subfolder="text_encoder", - torch_dtype=torch.float16, -) -tokenizer = CLIPTokenizer.from_pretrained( - base_model, - subfolder="tokenizer", - torch_dtype=torch.float16, -) -unet = UNet2DConditionModel.from_pretrained( - base_model, - subfolder="unet", - torch_dtype=torch.float16, -) -pipe = StableDiffusionPipeline( - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - vae=vae, - scheduler=scheduler, -) - -unet.set_attn_processor(CrossAttnProcessor) -pipe.setup_text_encoder(clip_skip, text_encoder) -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - -def get_model_list(): - return models - -te_cache = { - base_model: text_encoder -} - -unet_cache = { - base_model: unet -} - -lora_cache = { - base_model: LoRANetwork(text_encoder, unet) -} - -te_base_weight_length = text_encoder.get_input_embeddings().weight.data.shape[0] -original_prepare_for_tokenization = tokenizer.prepare_for_tokenization -current_model = base_model - -def setup_model(name, lora_state=None, lora_scale=1.0): - global pipe, current_model - - keys = [k[0] for k in models] - model = models[keys.index(name)][1] - if model not in unet_cache: - unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet", torch_dtype=torch.float16) - text_encoder = CLIPTextModel.from_pretrained(model, subfolder="text_encoder", torch_dtype=torch.float16) - - unet_cache[model] = unet - te_cache[model] = text_encoder - lora_cache[model] = LoRANetwork(text_encoder, unet) - - if current_model != model: - if current_model not in keep_vram: - # offload current model - unet_cache[current_model].to("cpu") - te_cache[current_model].to("cpu") - lora_cache[current_model].to("cpu") - current_model = model - - local_te, local_unet, local_lora, = te_cache[model], unet_cache[model], lora_cache[model] - local_unet.set_attn_processor(CrossAttnProcessor()) - local_lora.reset() - clip_skip = models[keys.index(name)][2] - - if torch.cuda.is_available(): - local_unet.to("cuda") - local_te.to("cuda") - - if lora_state is not None and lora_state != "": - local_lora.load(lora_state, lora_scale) - local_lora.to(local_unet.device, dtype=local_unet.dtype) - - pipe.text_encoder, pipe.unet = local_te, local_unet - pipe.setup_unet(local_unet) - pipe.tokenizer.prepare_for_tokenization = original_prepare_for_tokenization - pipe.tokenizer.added_tokens_encoder = {} - pipe.tokenizer.added_tokens_decoder = {} - pipe.setup_text_encoder(clip_skip, local_te) - return pipe - - -def error_str(error, title="Error"): - return ( - f"""#### {title} - {error}""" - if error - else "" - ) - -def make_token_names(embs): - all_tokens = [] - for name, vec in embs.items(): - tokens = [f'emb-{name}-{i}' for i in range(len(vec))] - all_tokens.append(tokens) - return all_tokens - -def setup_tokenizer(tokenizer, embs): - reg_match = [re.compile(fr"(?:^|(?<=\s|,)){k}(?=,|\s|$)") for k in embs.keys()] - clip_keywords = [' '.join(s) for s in make_token_names(embs)] - - def parse_prompt(prompt: str): - for m, v in zip(reg_match, clip_keywords): - prompt = m.sub(v, prompt) - return prompt - - def prepare_for_tokenization(self, text: str, is_split_into_words: bool = False, **kwargs): - text = parse_prompt(text) - r = original_prepare_for_tokenization(text, is_split_into_words, **kwargs) - return r - tokenizer.prepare_for_tokenization = prepare_for_tokenization.__get__(tokenizer, CLIPTokenizer) - return [t for sublist in make_token_names(embs) for t in sublist] - - -def convert_size(size_bytes): - if size_bytes == 0: - return "0B" - size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size_bytes, 1024))) - p = math.pow(1024, i) - s = round(size_bytes / p, 2) - return "%s %s" % (s, size_name[i]) - -def inference( - prompt, - guidance, - steps, - width=512, - height=512, - seed=0, - neg_prompt="", - state=None, - g_strength=0.4, - img_input=None, - i2i_scale=0.5, - hr_enabled=False, - hr_method="Latent", - hr_scale=1.5, - hr_denoise=0.8, - sampler="DPM++ 2M Karras", - embs=None, - model=None, - lora_state=None, - lora_scale=None, -): - if seed is None or seed == 0: - seed = random.randint(0, 2147483647) - - pipe = setup_model(model, lora_state, lora_scale) - generator = torch.Generator("cuda").manual_seed(int(seed)) - start_time = time.time() - - sampler_name, sampler_opt = None, None - for label, funcname, options in samplers_k_diffusion: - if label == sampler: - sampler_name, sampler_opt = funcname, options - - tokenizer, text_encoder = pipe.tokenizer, pipe.text_encoder - if embs is not None and len(embs) > 0: - ti_embs = {} - for name, file in embs.items(): - if str(file).endswith(".pt"): - loaded_learned_embeds = torch.load(file, map_location="cpu") - else: - loaded_learned_embeds = load_file(file, device="cpu") - loaded_learned_embeds = loaded_learned_embeds["string_to_param"]["*"] if "string_to_param" in loaded_learned_embed else loaded_learned_embed - ti_embs[name] = loaded_learned_embeds - - if len(ti_embs) > 0: - tokens = setup_tokenizer(tokenizer, ti_embs) - added_tokens = tokenizer.add_tokens(tokens) - delta_weight = torch.cat([val for val in ti_embs.values()], dim=0) - - assert added_tokens == delta_weight.shape[0] - text_encoder.resize_token_embeddings(len(tokenizer)) - token_embeds = text_encoder.get_input_embeddings().weight.data - token_embeds[-delta_weight.shape[0]:] = delta_weight - - config = { - "negative_prompt": neg_prompt, - "num_inference_steps": int(steps), - "guidance_scale": guidance, - "generator": generator, - "sampler_name": sampler_name, - "sampler_opt": sampler_opt, - "pww_state": state, - "pww_attn_weight": g_strength, - "start_time": start_time, - "timeout": timeout, - } - - if img_input is not None: - ratio = min(height / img_input.height, width / img_input.width) - img_input = img_input.resize( - (int(img_input.width * ratio), int(img_input.height * ratio)), Image.LANCZOS - ) - result = pipe.img2img(prompt, image=img_input, strength=i2i_scale, **config) - elif hr_enabled: - result = pipe.txt2img( - prompt, - width=width, - height=height, - upscale=True, - upscale_x=hr_scale, - upscale_denoising_strength=hr_denoise, - **config, - **latent_upscale_modes[hr_method], - ) - else: - result = pipe.txt2img(prompt, width=width, height=height, **config) - - end_time = time.time() - vram_free, vram_total = torch.cuda.mem_get_info() - print(f"done: model={model}, res={width}x{height}, step={steps}, time={round(end_time-start_time, 2)}s, vram_alloc={convert_size(vram_total-vram_free)}/{convert_size(vram_total)}") - return gr.Image.update(result[0][0], label=f"Initial Seed: {seed}") - - -color_list = [] - - -def get_color(n): - for _ in range(n - len(color_list)): - color_list.append(tuple(np.random.random(size=3) * 256)) - return color_list - - -def create_mixed_img(current, state, w=512, h=512): - w, h = int(w), int(h) - image_np = np.full([h, w, 4], 255) - if state is None: - state = {} - - colors = get_color(len(state)) - idx = 0 - - for key, item in state.items(): - if item["map"] is not None: - m = item["map"] < 255 - alpha = 150 - if current == key: - alpha = 200 - image_np[m] = colors[idx] + (alpha,) - idx += 1 - - return image_np - - -# width.change(apply_new_res, inputs=[width, height, global_stats], outputs=[global_stats, sp, rendered]) -def apply_new_res(w, h, state): - w, h = int(w), int(h) - - for key, item in state.items(): - if item["map"] is not None: - item["map"] = resize(item["map"], w, h) - - update_img = gr.Image.update(value=create_mixed_img("", state, w, h)) - return state, update_img - - -def detect_text(text, state, width, height): - - if text is None or text == "": - return None, None, gr.Radio.update(value=None), None - - t = text.split(",") - new_state = {} - - for item in t: - item = item.strip() - if item == "": - continue - if state is not None and item in state: - new_state[item] = { - "map": state[item]["map"], - "weight": state[item]["weight"], - "mask_outsides": state[item]["mask_outsides"], - } - else: - new_state[item] = { - "map": None, - "weight": 0.5, - "mask_outsides": False - } - update = gr.Radio.update(choices=[key for key in new_state.keys()], value=None) - update_img = gr.update(value=create_mixed_img("", new_state, width, height)) - update_sketch = gr.update(value=None, interactive=False) - return new_state, update_sketch, update, update_img - - -def resize(img, w, h): - trs = transforms.Compose( - [ - transforms.ToPILImage(), - transforms.Resize(min(h, w)), - transforms.CenterCrop((h, w)), - ] - ) - result = np.array(trs(img), dtype=np.uint8) - return result - - -def switch_canvas(entry, state, width, height): - if entry == None: - return None, 0.5, False, create_mixed_img("", state, width, height) - - return ( - gr.update(value=None, interactive=True), - gr.update(value=state[entry]["weight"] if entry in state else 0.5), - gr.update(value=state[entry]["mask_outsides"] if entry in state else False), - create_mixed_img(entry, state, width, height), - ) - - -def apply_canvas(selected, draw, state, w, h): - if selected in state: - w, h = int(w), int(h) - state[selected]["map"] = resize(draw, w, h) - return state, gr.Image.update(value=create_mixed_img(selected, state, w, h)) - - -def apply_weight(selected, weight, state): - if selected in state: - state[selected]["weight"] = weight - return state - - -def apply_option(selected, mask, state): - if selected in state: - state[selected]["mask_outsides"] = mask - return state - - -# sp2, radio, width, height, global_stats -def apply_image(image, selected, w, h, strgength, mask, state): - if selected in state: - state[selected] = { - "map": resize(image, w, h), - "weight": strgength, - "mask_outsides": mask - } - - return state, gr.Image.update(value=create_mixed_img(selected, state, w, h)) - - -# [ti_state, lora_state, ti_vals, lora_vals, uploads] -def add_net(files, ti_state, lora_state): - if files is None: - return ti_state, "", lora_state, None - - for file in files: - item = Path(file.name) - stripedname = str(item.stem).strip() - if item.suffix == ".pt": - state_dict = torch.load(file.name, map_location="cpu") - else: - state_dict = load_file(file.name, device="cpu") - if any("lora" in k for k in state_dict.keys()): - lora_state = file.name - else: - ti_state[stripedname] = file.name - - return ( - ti_state, - lora_state, - gr.Text.update(f"{[key for key in ti_state.keys()]}"), - gr.Text.update(f"{lora_state}"), - gr.Files.update(value=None), - ) - - -# [ti_state, lora_state, ti_vals, lora_vals, uploads] -def clean_states(ti_state, lora_state): - return ( - dict(), - None, - gr.Text.update(f""), - gr.Text.update(f""), - gr.File.update(value=None), - ) - - -latent_upscale_modes = { - "Latent": {"upscale_method": "bilinear", "upscale_antialias": False}, - "Latent (antialiased)": {"upscale_method": "bilinear", "upscale_antialias": True}, - "Latent (bicubic)": {"upscale_method": "bicubic", "upscale_antialias": False}, - "Latent (bicubic antialiased)": { - "upscale_method": "bicubic", - "upscale_antialias": True, - }, - "Latent (nearest)": {"upscale_method": "nearest", "upscale_antialias": False}, - "Latent (nearest-exact)": { - "upscale_method": "nearest-exact", - "upscale_antialias": False, - }, -} - -css = """ -.finetuned-diffusion-div div{ - display:inline-flex; - align-items:center; - gap:.8rem; - font-size:1.75rem; - padding-top:2rem; -} -.finetuned-diffusion-div div h1{ - font-weight:900; - margin-bottom:7px -} -.finetuned-diffusion-div p{ - margin-bottom:10px; - font-size:94% -} -.box { - float: left; - height: 20px; - width: 20px; - margin-bottom: 15px; - border: 1px solid black; - clear: both; -} -a{ - text-decoration:underline -} -.tabs{ - margin-top:0; - margin-bottom:0 -} -#gallery{ - min-height:20rem -} -.no-border { - border: none !important; -} - """ -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
      -
      -

      Demo For LSP model

      - """ - ) - global_stats = gr.State(value={}) - - with gr.Row(): - - with gr.Column(scale=55): - model = gr.Dropdown( - choices=[k[0] for k in get_model_list()], - label="Model", - value=base_name, - ) - image_out = gr.Image(height=512) - # gallery = gr.Gallery( - # label="Generated images", show_label=False, elem_id="gallery" - # ).style(grid=[1], height="auto") - - with gr.Column(scale=45): - - with gr.Group(): - - with gr.Row(): - with gr.Column(scale=70): - - prompt = gr.Textbox( - label="Prompt", - value="best quality, masterpiece, highres , 1girl,real photo , beautiful face, magic clothes", - show_label=True, - max_lines=4, - placeholder="Enter prompt.", - ) - neg_prompt = gr.Textbox( - label="Negative Prompt", - value="simple background,monochrome ,lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits,twisting jawline, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, lowres, bad anatomy, bad hands, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, ugly,pregnant,vore,duplicate,morbid,mut ilated,tran nsexual, hermaphrodite,long neck,mutated hands,poorly drawn hands,poorly drawn face,mutation,deformed,blurry,bad anatomy,bad proportions,malformed limbs,extra limbs,cloned face,disfigured,gross proportions, missing arms, missing legs, extra arms,extra legs,pubic hair, plump,bad legs,error legs,username,blurry,bad feet", - show_label=True, - max_lines=4, - placeholder="Enter negative prompt.", - ) - - generate = gr.Button(value="Generate").style( - rounded=(False, True, True, False) - ) - - with gr.Tab("Options"): - - with gr.Group(): - - # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1) - with gr.Row(): - guidance = gr.Slider( - label="Guidance scale", value=7.5, maximum=15 - ) - steps = gr.Slider( - label="Steps", value=25, minimum=2, maximum=50, step=1 - ) - - with gr.Row(): - width = gr.Slider( - label="Width", value=512, minimum=64, maximum=1024, step=64 - ) - height = gr.Slider( - label="Height", value=512, minimum=64, maximum=1024, step=64 - ) - - sampler = gr.Dropdown( - value="DPM++ 2M Karras", - label="Sampler", - choices=[s[0] for s in samplers_k_diffusion], - ) - seed = gr.Number(label="Seed (0 = random)", value=0) - - with gr.Tab("Image to image"): - with gr.Group(): - - inf_image = gr.Image( - label="Image", height=256, tool="editor", type="pil" - ) - inf_strength = gr.Slider( - label="Transformation strength", - minimum=0, - maximum=1, - step=0.01, - value=0.5, - ) - - def res_cap(g, w, h, x): - if g: - return f"Enable upscaler: {w}x{h} to {int(w*x)}x{int(h*x)}" - else: - return "Enable upscaler" - - with gr.Tab("Hires fix"): - with gr.Group(): - - hr_enabled = gr.Checkbox(label="Enable upscaler", value=False) - hr_method = gr.Dropdown( - [key for key in latent_upscale_modes.keys()], - value="Latent", - label="Upscale method", - ) - hr_scale = gr.Slider( - label="Upscale factor", - minimum=1.0, - maximum=2.0, - step=0.1, - value=1.5, - ) - hr_denoise = gr.Slider( - label="Denoising strength", - minimum=0.0, - maximum=1.0, - step=0.1, - value=0.8, - ) - - hr_scale.change( - lambda g, x, w, h: gr.Checkbox.update( - label=res_cap(g, w, h, x) - ), - inputs=[hr_enabled, hr_scale, width, height], - outputs=hr_enabled, - queue=False, - ) - hr_enabled.change( - lambda g, x, w, h: gr.Checkbox.update( - label=res_cap(g, w, h, x) - ), - inputs=[hr_enabled, hr_scale, width, height], - outputs=hr_enabled, - queue=False, - ) - - with gr.Tab("Embeddings/Loras"): - - ti_state = gr.State(dict()) - lora_state = gr.State() - - with gr.Group(): - with gr.Row(): - with gr.Column(scale=90): - ti_vals = gr.Text(label="Loaded embeddings") - - with gr.Row(): - with gr.Column(scale=90): - lora_vals = gr.Text(label="Loaded loras") - - with gr.Row(): - - uploads = gr.Files(label="Upload new embeddings/lora") - - with gr.Column(): - lora_scale = gr.Slider( - label="Lora scale", - minimum=0, - maximum=2, - step=0.01, - value=1.0, - ) - btn = gr.Button(value="Upload") - btn_del = gr.Button(value="Reset") - - btn.click( - add_net, - inputs=[uploads, ti_state, lora_state], - outputs=[ti_state, lora_state, ti_vals, lora_vals, uploads], - queue=False, - ) - btn_del.click( - clean_states, - inputs=[ti_state, lora_state], - outputs=[ti_state, lora_state, ti_vals, lora_vals, uploads], - queue=False, - ) - - # error_output = gr.Markdown() - - gr.HTML( - f""" -
      -
      -

      Paint with words

      -
      -

      - Will use the following formula: w = scale * token_weight_martix * log(1 + sigma) * max(qk). -

      -
      - """ - ) - - with gr.Row(): - - with gr.Column(scale=55): - - rendered = gr.Image( - invert_colors=True, - source="canvas", - interactive=False, - image_mode="RGBA", - ) - - with gr.Column(scale=45): - - with gr.Group(): - with gr.Row(): - with gr.Column(scale=70): - g_strength = gr.Slider( - label="Weight scaling", - minimum=0, - maximum=0.8, - step=0.01, - value=0.4, - ) - - text = gr.Textbox( - lines=2, - interactive=True, - label="Token to Draw: (Separate by comma)", - ) - - radio = gr.Radio([], label="Tokens") - - sk_update = gr.Button(value="Update").style( - rounded=(False, True, True, False) - ) - - # g_strength.change(lambda b: gr.update(f"Scaled additional attn: $w = {b} \log (1 + \sigma) \std (Q^T K)$."), inputs=g_strength, outputs=[g_output]) - - with gr.Tab("SketchPad"): - - sp = gr.Image( - image_mode="L", - tool="sketch", - source="canvas", - interactive=False, - ) - - mask_outsides = gr.Checkbox( - label="Mask other areas", - value=False - ) - - strength = gr.Slider( - label="Token strength", - minimum=0, - maximum=0.8, - step=0.01, - value=0.5, - ) - - - sk_update.click( - detect_text, - inputs=[text, global_stats, width, height], - outputs=[global_stats, sp, radio, rendered], - queue=False, - ) - radio.change( - switch_canvas, - inputs=[radio, global_stats, width, height], - outputs=[sp, strength, mask_outsides, rendered], - queue=False, - ) - sp.edit( - apply_canvas, - inputs=[radio, sp, global_stats, width, height], - outputs=[global_stats, rendered], - queue=False, - ) - strength.change( - apply_weight, - inputs=[radio, strength, global_stats], - outputs=[global_stats], - queue=False, - ) - mask_outsides.change( - apply_option, - inputs=[radio, mask_outsides, global_stats], - outputs=[global_stats], - queue=False, - ) - - with gr.Tab("UploadFile"): - - sp2 = gr.Image( - image_mode="L", - source="upload", - shape=(512, 512), - ) - - mask_outsides2 = gr.Checkbox( - label="Mask other areas", - value=False, - ) - - strength2 = gr.Slider( - label="Token strength", - minimum=0, - maximum=0.8, - step=0.01, - value=0.5, - ) - - apply_style = gr.Button(value="Apply") - apply_style.click( - apply_image, - inputs=[sp2, radio, width, height, strength2, mask_outsides2, global_stats], - outputs=[global_stats, rendered], - queue=False, - ) - - width.change( - apply_new_res, - inputs=[width, height, global_stats], - outputs=[global_stats, rendered], - queue=False, - ) - height.change( - apply_new_res, - inputs=[width, height, global_stats], - outputs=[global_stats, rendered], - queue=False, - ) - - # color_stats = gr.State(value={}) - # text.change(detect_color, inputs=[sp, text, color_stats], outputs=[color_stats, rendered]) - # sp.change(detect_color, inputs=[sp, text, color_stats], outputs=[color_stats, rendered]) - - inputs = [ - prompt, - guidance, - steps, - width, - height, - seed, - neg_prompt, - global_stats, - g_strength, - inf_image, - inf_strength, - hr_enabled, - hr_method, - hr_scale, - hr_denoise, - sampler, - ti_state, - model, - lora_state, - lora_scale, - ] - outputs = [image_out] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - -print(f"Space built in {time.time() - start_time:.2f} seconds") -# demo.launch(share=True) -demo.launch(enable_queue=True, server_name="0.0.0.0", server_port=7860) \ No newline at end of file diff --git a/spaces/JoanGiner/DataDoc_Analyzer/app.py b/spaces/JoanGiner/DataDoc_Analyzer/app.py deleted file mode 100644 index 8cf3a85338c63fd6e30e1979b2d4b353a237a402..0000000000000000000000000000000000000000 --- a/spaces/JoanGiner/DataDoc_Analyzer/app.py +++ /dev/null @@ -1,269 +0,0 @@ -import openai -import gradio as gr -from langchain.embeddings import OpenAIEmbeddings -from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter -from langchain.vectorstores.faiss import FAISS -from langchain.chains.question_answering import load_qa_chain -from langchain.chains import LLMChain -from langchain.llms import OpenAI -from langchain import PromptTemplate -from langchain.docstore.document import Document -import pandas as pd -import os -import scipdf ## You need a Gorbid service available -import tabula ## You need to have the Java Tabula installed in the environment -from gradio import DataFrame -import asyncio -from transformers import pipeline -from dotenv import load_dotenv -import json -from src.extractor import Extractor -load_dotenv() - -## You api key from vendors or hugginface -#openai.api_key=os.getenv("OPEN_AI_API_KEY") -#LLMClient = OpenAI(model_name='text-davinci-003', openai_api_key=openai.api_key,temperature=0) -extractor = Extractor() - -# Define function to handle the Gradio interface -async def extraction(input_file, apikey, dimension): - # Build the chains - chain_incontext, chain_table = extractor.build_chains(apikey) - # Prepare the data - docsearch = await extractor.prepare_data(input_file, chain_table, apikey) - # Extract dimensions - if (dimension == "annotation"): - results, completeness_report = await extractor.get_annotation_dimension(docsearch,chain_incontext, retrieved_docs=10) - elif (dimension == "gathering"): - results, completeness_report = await extractor.get_gathering_dimension(docsearch,chain_incontext, retrieved_docs=10) - elif (dimension == "uses"): - results, completeness_report = await extractor.get_uses_dimension(docsearch,chain_incontext, retrieved_docs=10) - elif (dimension == "contrib"): - results, completeness_report = await extractor.get_contributors_dimension(docsearch,chain_incontext, retrieved_docs=10) - elif (dimension == "comp"): - results, completeness_report = await extractor.get_composition_dimension(docsearch,chain_incontext, retrieved_docs=10) - elif (dimension == "social"): - results, completeness_report = await extractor.get_social_concerns_dimension(docsearch,chain_incontext, retrieved_docs=10) - elif (dimension == "dist"): - results, completeness_report = await extractor.get_distribution_dimension(docsearch,chain_incontext, retrieved_docs=10) - # Get completeness report - #completeness_report = extractor.postprocessing(results) - return results, completeness_report - -async def ui_extraction(input_file, apikey, dimension): - if (input_file == None): - raise gr.Error("Please upload a data paper") - if (input_file.name.split(".")[-1] != "pdf"): - raise gr.Error("This is not a data paper!, please upload it in .pdf format") - file_name = input_file.name.split("/")[-1] - results, completeness_report = await extractor.extraction(file_name, input_file.name, apikey, dimension) - # Build results in the correct format for the Gradio front-end - results = pd.DataFrame(results, columns=['Dimension', 'Results']) - return results, gr.update(value=pd.DataFrame(completeness_report['report'],columns=['Completeness report: '+str(completeness_report['completeness'])+'%']), visible=True) - -async def complete(input_file): - file_name = input_file.name.split("/")[-1] - # Build the chains - chain_incontext, chain_table = extractor.build_chains(apikey=os.getenv("OPEN_AI_API_KEY")) - # Prepare the data - docsearch = await extractor.prepare_data(file_name, input_file.name, chain_table, apikey=os.getenv("OPEN_AI_API_KEY")) - #Retrieve dimensions - results = await asyncio.gather(extractor.get_annotation_dimension(docsearch,chain_incontext, retrieved_docs=10), - extractor.get_gathering_dimension(docsearch,chain_incontext, retrieved_docs=10), - extractor.get_uses_dimension(docsearch,chain_incontext, retrieved_docs=10), - extractor.get_contributors_dimension(docsearch,chain_incontext, retrieved_docs=10), - extractor.get_composition_dimension(docsearch,chain_incontext, retrieved_docs=10), - extractor.get_social_concerns_dimension(docsearch,chain_incontext, retrieved_docs=10), - extractor.get_distribution_dimension(docsearch,chain_incontext, retrieved_docs=10)) - # Get completeness report from the results - warnings = [] - extracts = [] - for result in results: - extracts.append(result[0]) - warnings.append(gr.update(value=pd.DataFrame(result[1]['report'],columns=['Completeness report: '+str(result[1]['completeness'])+'%']), visible=True)) - extracts.extend(warnings) - return extracts - -## Building the layout of the app -css = """.table-wrap.scroll-hide.svelte-8hrj8a.no-wrap { - white-space: normal; -} -#component-7 .wrap.svelte-xwlu1w { - min-height: var(--size-40); -} -div#component-2 h2 { - color: var(--block-label-text-color); - text-align: center; - border-radius: 7px; - text-align: center; - margin: 0 15% 0 15%; -} -div#component-5 { - border: 1px solid var(--border-color-primary); - border-radius: 0 0px 10px 10px; - padding: 20px; -} -.gradio-container.gradio-container-3-26-0.svelte-ac4rv4.app { - max-width: 850px; -} -div#component-6 { - min-height: 150px; -} -button#component-17 { - color: var(--block-label-text-color); -} -.gradio-container.gradio-container-3-26-0.svelte-ac4rv4.app { - max-width: 1100px; -} -#component-9 .wrap.svelte-xwlu1w { - min-height: var(--size-40); -} -div#component-11 { - height: var(--size-40); -} -div#component-9 { - border: 1px solid grey; - border-radius: 10px; - padding: 3px; - text-align: center; -} -""" - -with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo: - with gr.Row(): - gr.Markdown("## DataDoc Analyzer") - with gr.Row(): - gr.Markdown("""Extract, in a structured manner, the **[general guidelines](https://knowingmachines.org/reading-list#dataset_documentation_practices)** from the ML community about dataset documentation practices from its scientific documentation. Study and analyze scientific data published in peer-review journals such as: **[Nature's Scientific Data](https://www.nature.com/sdata/)** and **[Data-in-Brief](https://www.data-in-brief.com)**. Here you have a **[complete list](https://zenodo.org/record/7082126#.ZDaf-OxBz0p)** of data journals suitable to be analyzed with this tool. - """) - - with gr.Row(): - - with gr.Column(): - fileinput = gr.File(label="Upload the dataset documentation"), - - with gr.Column(): - gr.Markdown("""

      Instructions:

      - - ⤵ Try the examples at the bottom - - then - - - ⇨ Set your API key of OpenAI - - ⇦ Upload your data paper (in PDF or TXT) - - ⇩ Click in get insights in one tab! - - - """) - with gr.Column(): - apikey_elem = gr.Text(label="OpenAI API key", type="password") - # gr.Markdown(""" - #

      Improving your data and assesing your dataset documentation

      - # The generated warning also allows you quicly check the completeness of the documentation, and spotting gaps in the document - #

      Performing studies studies over scientific data

      - # If you need to analyze a large scale of documents, we provide an API that can be used programatically. Documentation on how to use it is at the bottom of the page. """) - with gr.Row(): - with gr.Tab("Annotation"): - - gr.Markdown("""In this dimension, you can get information regarding the annotation process of the data: Extract a description of the process and infer its type. Extract the labels and information about the annotation team, the infrastructure used to annotate the data, and the validation process applied to the labels.""") - result_anot = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_anot = gr.DataFrame(headers=["warnings"],type="array", visible=False) - button_annotation = gr.Button("Get the annotation process insights!") - - with gr.Tab("Gathering"): - gr.Markdown("""In this dimension, we get information regarding the collection process of the data: We provide a description of the process and we infer its type from the documentation. Then we extract information about the collection team, the infrastructure used to collect the data and the sources. Also we get the timeframe of the data collection and its geolocalization.""") - result_gather = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_gather = gr.DataFrame(headers=["warnings"],type="array", visible=False) - button_gathering = gr.Button("Get the gathering process insights!") - with gr.Tab("Uses"): - gr.Markdown("""In this dimension, we extract the design intentios of the authors, we extract the purposes, gaps, and we infer the ML tasks (extracted form hugginface) the dataset is inteded for. Also we get the uses recomendation and the ML Benchmarks if the dataset have been tested with them""") - result_uses = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_uses = gr.DataFrame(headers=["warnings"],type="array", visible=False) - button_uses = gr.Button("Get the uses of the dataset!") - with gr.Tab("Contributors"): - gr.Markdown("""In this dimension, we extract all the contributors, funding information and maintenance of the dataset""") - result_contrib = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_contrib = gr.DataFrame(headers=["warnings"],type="array", visible=False) - button_contrib = gr.Button("Get the contributors of the dataset!") - - with gr.Tab("Composition"): - gr.Markdown("""In this dimension, we extract the file structure, we identify the attributes of the dataset, the recommneded trainig splits and the relevant statistics (if provided in the documentation) """) - result_comp = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_comp = gr.DataFrame(headers=["warnings"],type="array", visible=False) - button_comp = gr.Button("Get the composition of the dataset!") - with gr.Tab("Social Concerns"): - gr.Markdown("""In this dimension, we extract social concerns regarding the representativeness of social groups, potential biases, sensitivity issues, and privacy issues. """) - result_social = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_social = gr.DataFrame(headers=["warnings"],type="array", visible=False) - button_social = gr.Button("Get the Social Cocerns!") - - with gr.Tab("Distribution"): - gr.Markdown("""In this dimension, we aim to extract the legal conditions under the dataset is released) """) - result_distri = gr.DataFrame(headers=["dimension","result"],type="array",label="Results of the extraction:") - alerts_distribution = gr.DataFrame(headers=["warning"],type="array", visible=False) - button_dist = gr.Button("Get the Distribution!") - with gr.Row(): - examples = gr.Examples( - examples=["sources/Nature-Scientific-Data/A whole-body FDG-PET:CT.pdf","sources/Nature-Scientific-Data/Lontar-Manuscripts.pdf"], - inputs=[fileinput[0]], - fn=complete, - outputs=[ - result_anot, - result_gather, - result_uses, - result_contrib, - result_comp, - result_social, - result_distri, - alerts_anot, - alerts_gather, - alerts_uses, - alerts_contrib, - alerts_comp, - alerts_social, - alerts_distribution], - cache_examples=True) - button_complete = gr.Button("Get all the dimensions", visible=False) - allres = gr.Text(visible=False) - ## Events of the apps - button_annotation.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="annotation")],outputs=[result_anot,alerts_anot]) - button_gathering.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State("gathering") ],outputs=[result_gather,alerts_gather]) - button_uses.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State("uses") ],outputs=[result_uses,alerts_uses]) - button_contrib.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State("contrib") ],outputs=[result_contrib,alerts_contrib]) - button_comp.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State("comp") ],outputs=[result_comp,alerts_comp]) - button_social.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State("social") ],outputs=[result_social,alerts_social]) - button_dist.click(ui_extraction,inputs=[fileinput[0],apikey_elem,gr.State("dist") ],outputs=[result_distri,alerts_distribution]) - - - ## API endpoints - #api_annotation = gr.Button(visible=False) - #api_annotation.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="annotation")],outputs=[result_anot,alerts_anot], api_name="annotation") - #api_gathering = gr.Button(visible=False) - #api_gathering.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="gathering")],outputs=[result_anot,alerts_anot], api_name="gathering") - #api_uses = gr.Button(visible=False) - #api_uses.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="uses")],outputs=[result_anot,alerts_anot], api_name="uses") - # api_contrib = gr.Button(visible=False) - # api_contrib.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="contrib")],outputs=[result_anot,alerts_anot], api_name="contrib") - #api_comp = gr.Button(visible=False) - #api_comp.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="comp")],outputs=[result_anot,alerts_anot], api_name="composition") - #api_social = gr.Button(visible=False) - #api_social.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="social")],outputs=[result_anot,alerts_anot], api_name="social") - #api_dist = gr.Button(visible=False) - #api_dist.click(api_extraction,inputs=[fileinput[0],apikey_elem,gr.State(value="dist")],outputs=[result_anot,alerts_anot], api_name="dist") - - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - #button_complete.click(api_extraction,inputs=[fileinput[0],apikey_elem,"annotation"],outputs=allres, api_name="annotation") - - - # Run the app - #demo.queue(concurrency_count=5,max_size=20).launch() - demo.launch(share=False,show_api=False) - diff --git a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/model_components/network.py b/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/model_components/network.py deleted file mode 100644 index c35b32a594ecaed2284ba0c8b9ff133a7297a3dc..0000000000000000000000000000000000000000 --- a/spaces/KAIST-Geometric-AI-Lab/salad-demo/salad/model_components/network.py +++ /dev/null @@ -1,229 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from dotmap import DotMap -from salad.model_components.simple_module import TimePointWiseEncoder, TimestepEmbedder - - -from salad.model_components.transformer import ( - PositionalEncoding, - TimeTransformerDecoder, - TimeTransformerEncoder, -) - -class UnCondDiffNetwork(nn.Module): - def __init__(self, input_dim, residual, **kwargs): - """ - Transformer Encoder. - """ - super().__init__() - self.input_dim = input_dim - self.residual = residual - self.__dict__.update(kwargs) - self.hparams = DotMap(self.__dict__) - - self._build_model() - - def _build_model(self): - self.act = F.leaky_relu - if self.hparams.get("use_timestep_embedder"): - self.time_embedder = TimestepEmbedder(self.hparams.timestep_embedder_dim) - dim_ctx = self.hparams.timestep_embedder_dim - else: - dim_ctx = 3 - - """ - Encoder part - """ - enc_dim = self.hparams.embedding_dim - self.embedding = nn.Linear(self.hparams.input_dim, enc_dim) - if not self.hparams.get("encoder_type"): - self.encoder = TimeTransformerEncoder( - enc_dim, - dim_ctx=dim_ctx, - num_heads=self.hparams.num_heads - if self.hparams.get("num_heads") - else 4, - use_time=True, - num_layers=self.hparams.enc_num_layers, - last_fc=True, - last_fc_dim_out=self.hparams.input_dim, - ) - else: - if self.hparams.encoder_type == "transformer": - self.encoder = TimeTransformerEncoder( - enc_dim, - dim_ctx=dim_ctx, - num_heads=self.hparams.num_heads - if self.hparams.get("num_heads") - else 4, - use_time=True, - num_layers=self.hparams.enc_num_layers, - last_fc=True, - last_fc_dim_out=self.hparams.input_dim, - dropout=self.hparams.get("attn_dropout", 0.0) - ) - else: - raise ValueError - - def forward(self, x, beta): - """ - Input: - x: [B,G,D] latent - beta: B - Output: - eta: [B,G,D] - """ - B, G = x.shape[:2] - if self.hparams.get("use_timestep_embedder"): - time_emb = self.time_embedder(beta).unsqueeze(1) - else: - beta = beta.view(B, 1, 1) - time_emb = torch.cat( - [beta, torch.sin(beta), torch.cos(beta)], dim=-1 - ) # [B,1,3] - - ctx = time_emb - x_emb = self.embedding(x) - - out = self.encoder(x_emb, ctx=ctx) - - if self.hparams.residual: - out = out + x - return out - - -class CondDiffNetwork(nn.Module): - def __init__(self, input_dim, residual, **kwargs): - """ - Transformer Encoder + Decoder. - """ - super().__init__() - self.input_dim = input_dim - self.residual = residual - self.__dict__.update(kwargs) - self.hparams = DotMap(self.__dict__) - - self._build_model() - - def _build_model(self): - self.act = F.leaky_relu - if self.hparams.get("use_timestep_embedder"): - self.time_embedder = TimestepEmbedder(self.hparams.timestep_embedder_dim) - dim_ctx = self.hparams.timestep_embedder_dim - else: - dim_ctx = 3 - """ - Encoder part - """ - enc_dim = self.hparams.context_embedding_dim - self.context_embedding = nn.Linear(self.hparams.context_dim, enc_dim) - if self.hparams.encoder_type == "transformer": - self.encoder = TimeTransformerEncoder( - enc_dim, - 3, - num_heads=4, - use_time=self.hparams.encoder_use_time, - num_layers=self.hparams.enc_num_layers - if self.hparams.get("enc_num_layers") - else 3, - last_fc=False, - ) - - elif self.hparams.encoder_type == "pointwise": - self.encoder = TimePointWiseEncoder( - enc_dim, - dim_ctx=None, - use_time=self.hparams.encoder_use_time, - num_layers=self.hparams.enc_num_layers, - ) - else: - raise ValueError - - """ - Decoder part - """ - dec_dim = self.hparams.embedding_dim - input_dim = self.hparams.input_dim - self.query_embedding = nn.Linear(self.hparams.input_dim, dec_dim) - if self.hparams.decoder_type == "transformer_decoder": - self.decoder = TimeTransformerDecoder( - dec_dim, - enc_dim, - dim_ctx=dim_ctx, - num_heads=4, - last_fc=True, - last_fc_dim_out=input_dim, - num_layers=self.hparams.dec_num_layers - if self.hparams.get("dec_num_layers") - else 3, - ) - elif self.hparams.decoder_type == "transformer_encoder": - self.decoder = TimeTransformerEncoder( - dec_dim, - dim_ctx=enc_dim + dim_ctx, - num_heads=4, - last_fc=True, - last_fc_dim_out=input_dim, - num_layers=self.hparams.dec_num_layers - if self.hparams.get("dec_num_layers") - else 3, - ) - else: - raise ValueError - - def forward(self, x, beta, context): - """ - Input: - x: [B,G,D] intrinsic - beta: B - context: [B,G,D2] or [B, D2] condition - Output: - eta: [B,G,D] - """ - # print(f"x: {x.shape} context: {context.shape} beta: {beta.shape}") - B, G = x.shape[:2] - - if self.hparams.get("use_timestep_embedder"): - time_emb = self.time_embedder(beta).unsqueeze(1) - else: - beta = beta.view(B, 1, 1) - time_emb = torch.cat( - [beta, torch.sin(beta), torch.cos(beta)], dim=-1 - ) # [B,1,3] - ctx = time_emb - """ - Encoding - """ - cout = self.context_embedding(context) - cout = self.encoder(cout, ctx=ctx if self.hparams.encoder_use_time else None) - - if cout.ndim == 2: - cout = cout.unsqueeze(1).expand(-1, G, -1) - - """ - Decoding - """ - out = self.query_embedding(x) - if self.hparams.get("use_pos_encoding"): - out = self.pos_encoding(out) - - if self.hparams.decoder_type == "transformer_encoder": - try: - ctx = ctx.expand(-1, G, -1) - if cout.ndim == 2: - cout = cout.unsqueeze(1) - cout = cout.expand(-1, G, -1) - ctx = torch.cat([ctx, cout], -1) - except Exception as e: - print(e, G, ctx.shape, cout.shape) - out = self.decoder(out, ctx=ctx) - else: - out = self.decoder(out, cout, ctx=ctx) - - # if hasattr(self, "last_fc"): - # out = self.last_fc(out) - - if self.hparams.residual: - out = out + x - return out diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/README.md b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/README.md deleted file mode 100644 index 447c92b7ab8a60ae4d96ebaedd67829818503ba2..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: MockingBird -emoji: 🔥 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false -license: mit -duplicated_from: lewiswu1209/MockingBird ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kimata/multimodal_deepfake_detection/data/augmentation_utils.py b/spaces/Kimata/multimodal_deepfake_detection/data/augmentation_utils.py deleted file mode 100644 index 9ed98fda861f3a1fcf67de534c79a76b55575163..0000000000000000000000000000000000000000 --- a/spaces/Kimata/multimodal_deepfake_detection/data/augmentation_utils.py +++ /dev/null @@ -1,88 +0,0 @@ -import cv2 -import librosa -import numpy as np -import albumentations -from albumentations import (Compose, ImageCompression, GaussNoise, HorizontalFlip, - PadIfNeeded, OneOf,ToGray, ShiftScaleRotate, GaussianBlur, - RandomBrightnessContrast, FancyPCA, HueSaturationValue, BasicTransform) - - -class AudioTransform(BasicTransform): - """ Transform for audio task. This is the main class where we override the targets and update params function for our need""" - @property - def targets(self): - return {"data": self.apply} - - def update_params(self, params, **kwargs): - if hasattr(self, "interpolation"): - params["interpolation"] = self.interpolation - if hasattr(self, "fill_value"): - params["fill_value"] = self.fill_value - return params - -class TimeShifting(AudioTransform): - """ Do time shifting of audio """ - def __init__(self, always_apply=False, p=0.5): - super(TimeShifting, self).__init__(always_apply, p) - - def apply(self,data,**params): - ''' - data : ndarray of audio timeseries - ''' - start_ = int(np.random.uniform(-80000,80000)) - if start_ >= 0: - audio_time_shift = np.r_[data[start_:], np.random.uniform(-0.001,0.001, start_)] - else: - audio_time_shift = np.r_[np.random.uniform(-0.001,0.001, -start_), data[:start_]] - - return audio_time_shift - -class PitchShift(AudioTransform): - """ Do time shifting of audio """ - def __init__(self, always_apply=False, p=0.5 , n_steps=None): - super(PitchShift, self).__init__(always_apply, p) - ''' - nsteps here is equal to number of semitones - ''' - - self.n_steps = n_steps - - def apply(self,data,**params): - ''' - data : ndarray of audio timeseries - ''' - return librosa.effects.pitch_shift(data,sr=16000,n_steps=self.n_steps) - - -class AddGaussianNoise(AudioTransform): - """ Do time shifting of audio """ - def __init__(self, always_apply=False, p=0.5): - super(AddGaussianNoise, self).__init__(always_apply, p) - - - def apply(self,data,**params): - ''' - data : ndarray of audio timeseries - ''' - noise = np.random.randn(len(data)) - data_wn = data + 0.005*noise - return data_wn - - -create_frame_transforms = Compose([ - ImageCompression(quality_lower=60, quality_upper=100, p=0.5), - GaussNoise(p=0.1), - GaussianBlur(blur_limit=3, p=0.05), - HorizontalFlip(), - PadIfNeeded(min_height=256, min_width=256, border_mode=cv2.BORDER_CONSTANT), - OneOf([RandomBrightnessContrast(), FancyPCA(), HueSaturationValue()], p=0.7), - ToGray(p=0.2), - ShiftScaleRotate(shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=0.5),]) - - - -create_spec_transforms = albumentations.Compose([ - TimeShifting(p=0.9), # here not p=1.0 because your nets should get some difficulties - AddGaussianNoise(p=0.8), - PitchShift(p=0.5,n_steps=4) - ]) diff --git a/spaces/KyanChen/FunSR/models/baselines/diinn.py b/spaces/KyanChen/FunSR/models/baselines/diinn.py deleted file mode 100644 index 699f7ae078128eeb189d3de2b8b01816517cdd3a..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/baselines/diinn.py +++ /dev/null @@ -1,181 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import models -from models import register - - -@register('diinn') -class DIINN(nn.Module): - def __init__(self, - encoder_spec, - mode=3, init_q=False): - super().__init__() - - self.encoder = models.make(encoder_spec) - self.decoder = ImplicitDecoder(mode=mode, init_q=init_q) - - def forward(self, x, size, bsize=None): - x = self.encoder(x) - x = self.decoder(x, size, bsize) - return x - - -class SineAct(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return torch.sin(x) - - -def patch_norm_2d(x, kernel_size=3): - # B, C, H, W = x.shape - # var, mean = torch.var_mean(F.unfold(x, kernel_size=kernel_size, padding=padding).view(B, C,kernel_size**2, H, W), dim=2, keepdim=False) - # return (x - mean) / torch.sqrt(var + 1e-6) - mean = F.avg_pool2d(x, kernel_size=kernel_size, padding=kernel_size // 2) - mean_sq = F.avg_pool2d(x ** 2, kernel_size=kernel_size, padding=kernel_size // 2) - var = mean_sq - mean ** 2 - return (x - mean) / (var + 1e-6) - - -class ImplicitDecoder(nn.Module): - def __init__(self, in_channels=64, hidden_dims=[256, 256, 256, 256], mode=3, init_q=False): - super().__init__() - - self.mode = mode - self.init_q = init_q - - last_dim_K = in_channels * 9 - - if self.init_q: - self.first_layer = nn.Sequential(nn.Conv2d(3, in_channels * 9, 1), - SineAct()) - last_dim_Q = in_channels * 9 - else: - last_dim_Q = 3 - - self.K = nn.ModuleList() - self.Q = nn.ModuleList() - if self.mode == 1: - for hidden_dim in hidden_dims: - self.K.append(nn.Sequential(nn.Conv2d(last_dim_K, hidden_dim, 1), - nn.ReLU())) - self.Q.append(nn.Sequential(nn.Conv2d(last_dim_Q, hidden_dim, 1), - SineAct())) - last_dim_K = hidden_dim - last_dim_Q = hidden_dim - elif self.mode == 2: - for hidden_dim in hidden_dims: - self.K.append(nn.Sequential(nn.Conv2d(last_dim_K, hidden_dim, 1), - nn.ReLU())) - self.Q.append(nn.Sequential(nn.Conv2d(last_dim_Q, hidden_dim, 1), - SineAct())) - last_dim_K = hidden_dim + in_channels * 9 - last_dim_Q = hidden_dim - elif self.mode == 3: - for hidden_dim in hidden_dims: - self.K.append(nn.Sequential(nn.Conv2d(last_dim_K, hidden_dim, 1), - nn.ReLU())) - self.Q.append(nn.Sequential(nn.Conv2d(last_dim_Q, hidden_dim, 1), - SineAct())) - last_dim_K = hidden_dim + in_channels * 9 - last_dim_Q = hidden_dim - elif self.mode == 4: - for hidden_dim in hidden_dims: - self.K.append(nn.Sequential(nn.Conv2d(last_dim_K, hidden_dim, 1), - nn.ReLU())) - self.Q.append(nn.Sequential(nn.Conv2d(last_dim_Q, hidden_dim, 1), - SineAct())) - last_dim_K = hidden_dim + in_channels * 9 - last_dim_Q = hidden_dim - if self.mode == 4: - self.last_layer = nn.Conv2d(hidden_dims[-1], 3, 3, padding=1, padding_mode='reflect') - else: - self.last_layer = nn.Conv2d(hidden_dims[-1], 3, 1) - - def _make_pos_encoding(self, x, size): - B, C, H, W = x.shape - H_up, W_up = size - - h_idx = -1 + 1 / H + 2 / H * torch.arange(H, device=x.device).float() - w_idx = -1 + 1 / W + 2 / W * torch.arange(W, device=x.device).float() - in_grid = torch.stack(torch.meshgrid(h_idx, w_idx, indexing='ij'), dim=0) - - h_idx_up = -1 + 1 / H_up + 2 / H_up * torch.arange(H_up, device=x.device).float() - w_idx_up = -1 + 1 / W_up + 2 / W_up * torch.arange(W_up, device=x.device).float() - up_grid = torch.stack(torch.meshgrid(h_idx_up, w_idx_up, indexing='ij'), dim=0) - - rel_grid = (up_grid - F.interpolate(in_grid.unsqueeze(0), size=(H_up, W_up), - mode='nearest-exact')) # important! mode='nearest' gives inconsistent results - rel_grid[:, 0, :, :] *= H - rel_grid[:, 1, :, :] *= W - - return rel_grid.contiguous().detach() - - def step(self, x, syn_inp): - if self.init_q: - syn_inp = self.first_layer(syn_inp) - x = syn_inp * x - if self.mode == 1: - k = self.K[0](x) - q = k * self.Q[0](syn_inp) - - for i in range(1, len(self.K)): - k = self.K[i](k) - q = k * self.Q[i](q) - - q = self.last_layer(q) - return q - elif self.mode == 2: - k = self.K[0](x) - q = k * self.Q[0](syn_inp) - for i in range(1, len(self.K)): - k = self.K[i](torch.cat([k, x], dim=1)) - q = k * self.Q[i](q) - q = self.last_layer(q) - return q - elif self.mode == 3: - k = self.K[0](x) - q = k * self.Q[0](syn_inp) - # q = k + self.Q[0](syn_inp) - for i in range(1, len(self.K)): - k = self.K[i](torch.cat([q, x], dim=1)) - q = k * self.Q[i](q) - # q = k + self.Q[i](q) - q = self.last_layer(q) - return q - elif self.mode == 4: - k = self.K[0](x) - q = k * self.Q[0](syn_inp) - for i in range(1, len(self.K)): - k = self.K[i](torch.cat([q, x], dim=1)) - q = k * self.Q[i](q) - q = self.last_layer(q) - return q - - def batched_step(self, x, syn_inp, bsize): - with torch.no_grad(): - h, w = syn_inp.shape[-2:] - ql = 0 - preds = [] - while ql < w: - qr = min(ql + bsize // h, w) - pred = self.step(x[:, :, :, ql: qr], syn_inp[:, :, :, ql: qr]) - preds.append(pred) - ql = qr - pred = torch.cat(preds, dim=-1) - return pred - - def forward(self, x, size, bsize=None): - B, C, H_in, W_in = x.shape - rel_coord = self._make_pos_encoding(x, size).expand(B, -1, *size) # 2 - ratio = x.new_tensor([(H_in * W_in) / (size[0] * size[1])]).view(1, -1, 1, 1).expand(B, -1, *size) # 2 - syn_inp = torch.cat([rel_coord, ratio], dim=1) - x = F.interpolate(F.unfold(x, 3, padding=1).view(B, C * 9, H_in, W_in), size=syn_inp.shape[-2:], - mode='nearest-exact') - if bsize is None: - pred = self.step(x, syn_inp) - else: - pred = self.batched_step(x, syn_inp, bsize) - return pred diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/__init__.py b/spaces/KyanChen/RSPrompter/mmdet/models/detectors/__init__.py deleted file mode 100644 index 666975354cf9e17e1f26e613b72a9314b845b410..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .atss import ATSS -from .autoassign import AutoAssign -from .base import BaseDetector -from .base_detr import DetectionTransformer -from .boxinst import BoxInst -from .cascade_rcnn import CascadeRCNN -from .centernet import CenterNet -from .condinst import CondInst -from .conditional_detr import ConditionalDETR -from .cornernet import CornerNet -from .crowddet import CrowdDet -from .d2_wrapper import Detectron2Wrapper -from .dab_detr import DABDETR -from .ddod import DDOD -from .deformable_detr import DeformableDETR -from .detr import DETR -from .dino import DINO -from .fast_rcnn import FastRCNN -from .faster_rcnn import FasterRCNN -from .fcos import FCOS -from .fovea import FOVEA -from .fsaf import FSAF -from .gfl import GFL -from .grid_rcnn import GridRCNN -from .htc import HybridTaskCascade -from .kd_one_stage import KnowledgeDistillationSingleStageDetector -from .lad import LAD -from .mask2former import Mask2Former -from .mask_rcnn import MaskRCNN -from .mask_scoring_rcnn import MaskScoringRCNN -from .maskformer import MaskFormer -from .nasfcos import NASFCOS -from .paa import PAA -from .panoptic_fpn import PanopticFPN -from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor -from .point_rend import PointRend -from .queryinst import QueryInst -from .reppoints_detector import RepPointsDetector -from .retinanet import RetinaNet -from .rpn import RPN -from .rtmdet import RTMDet -from .scnet import SCNet -from .semi_base import SemiBaseDetector -from .single_stage import SingleStageDetector -from .soft_teacher import SoftTeacher -from .solo import SOLO -from .solov2 import SOLOv2 -from .sparse_rcnn import SparseRCNN -from .tood import TOOD -from .trident_faster_rcnn import TridentFasterRCNN -from .two_stage import TwoStageDetector -from .vfnet import VFNet -from .yolact import YOLACT -from .yolo import YOLOV3 -from .yolof import YOLOF -from .yolox import YOLOX - -__all__ = [ - 'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', - 'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN', - 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS', - 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF', - 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT', - 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO', - 'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX', - 'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD', - 'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher', - 'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst', - 'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR' -] diff --git a/spaces/LZRi/LZR-Bert-VITS2/bert_gen.py b/spaces/LZRi/LZR-Bert-VITS2/bert_gen.py deleted file mode 100644 index 467655b2c4171608ad690fe7dec350db85f84f1b..0000000000000000000000000000000000000000 --- a/spaces/LZRi/LZR-Bert-VITS2/bert_gen.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -from torch.utils.data import DataLoader -from multiprocessing import Pool -import commons -import utils -from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate -from tqdm import tqdm -import warnings - -from text import cleaned_text_to_sequence, get_bert - -config_path = 'configs/config.json' -hps = utils.get_hparams_from_file(config_path) - -def process_line(line): - _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|") - phone = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - wav_path = f'{_id}' - - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - assert bert.shape[-1] == len(phone) - torch.save(bert, bert_path) - - -if __name__ == '__main__': - lines = [] - with open(hps.data.training_files, encoding='utf-8' ) as f: - lines.extend(f.readlines()) - - # with open(hps.data.validation_files, encoding='utf-8' ) as f: - # lines.extend(f.readlines()) - - with Pool(processes=2) as pool: #A100 40GB suitable config,if coom,please decrease the processess number. - for _ in tqdm(pool.imap_unordered(process_line, lines)): - pass diff --git a/spaces/LanguageBind/LanguageBind/open_clip/utils.py b/spaces/LanguageBind/LanguageBind/open_clip/utils.py deleted file mode 100644 index bb0bb8868ae1f2d31493ca32b73accd6bf1d3cdb..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/open_clip/utils.py +++ /dev/null @@ -1,89 +0,0 @@ -from itertools import repeat -import collections.abc - -import torch -from torch import nn as nn -from torchvision.ops.misc import FrozenBatchNorm2d - - -def freeze_batch_norm_2d(module, module_match={}, name=''): - """ - Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is - itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and - returned. Otherwise, the module is walked recursively and submodules are converted in place. - - Args: - module (torch.nn.Module): Any PyTorch module. - module_match (dict): Dictionary of full module names to freeze (all if empty) - name (str): Full module name (prefix) - - Returns: - torch.nn.Module: Resulting module - - Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 - """ - res = module - is_match = True - if module_match: - is_match = name in module_match - if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)): - res = FrozenBatchNorm2d(module.num_features) - res.num_features = module.num_features - res.affine = module.affine - if module.affine: - res.weight.data = module.weight.data.clone().detach() - res.bias.data = module.bias.data.clone().detach() - res.running_mean.data = module.running_mean.data - res.running_var.data = module.running_var.data - res.eps = module.eps - else: - for child_name, child in module.named_children(): - full_child_name = '.'.join([name, child_name]) if name else child_name - new_child = freeze_batch_norm_2d(child, module_match, full_child_name) - if new_child is not child: - res.add_module(child_name, new_child) - return res - - -# From PyTorch internals -def _ntuple(n): - def parse(x): - if isinstance(x, collections.abc.Iterable): - return x - return tuple(repeat(x, n)) - return parse - - -to_1tuple = _ntuple(1) -to_2tuple = _ntuple(2) -to_3tuple = _ntuple(3) -to_4tuple = _ntuple(4) -to_ntuple = lambda n, x: _ntuple(n)(x) - -# Replaces all linear layers with linear_replacement -# TODO: add int8 support for other linear layers including attn and convnets -def replace_linear(model, linear_replacement, include_modules=['c_fc', 'c_proj'], copy_weights=True): - for name, module in model.named_children(): - if len(list(module.children())) > 0: - replace_linear(module, linear_replacement, include_modules, copy_weights) - - if isinstance(module, torch.nn.Linear) and name in include_modules: - old_module = model._modules[name] - model._modules[name] = linear_replacement( - module.in_features, - module.out_features, - module.bias is not None, - ) - if copy_weights: - model._modules[name].weight.data.copy_(old_module.weight.data) - if model._modules[name].bias is not None: - model._modules[name].bias.data.copy_(old_module.bias) - - return model - -def convert_int8_model_to_inference_mode(model): - for m in model.modules(): - if hasattr(m, 'prepare_for_eval'): - int8_original_dtype = m.weight.dtype - m.prepare_for_eval() - m.int8_original_dtype = int8_original_dtype \ No newline at end of file diff --git a/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/maskrcnn/README.md b/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/maskrcnn/README.md deleted file mode 100644 index c6ef17e7659558a4f41834f4614d58caddcbe208..0000000000000000000000000000000000000000 --- a/spaces/Loren/Streamlit_OCR_comparator/configs/textdet/maskrcnn/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Mask R-CNN - -> [Mask R-CNN](https://arxiv.org/abs/1703.06870) - - - -## Abstract - -We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. - -
      - -
      - -## Results and models - -### CTW1500 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :----------------------------------------------------------: | :--------------: | :-----------: | :----------: | :-----: | :-------: | :----: | :-------: | :---: | :-------------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500.py) | ImageNet | CTW1500 Train | CTW1500 Test | 160 | 1600 | 0.753 | 0.712 | 0.732 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_ctw1500_20210219-96497a76.log.json) | - -### ICDAR2015 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :--------------------------------------------------------: | :--------------: | :-------------: | :------------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015.py) | ImageNet | ICDAR2015 Train | ICDAR2015 Test | 160 | 1920 | 0.783 | 0.872 | 0.825 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2015_20210219-8eb340a3.log.json) | - -### ICDAR2017 - -| Method | Pretrained Model | Training set | Test set | #epochs | Test size | Recall | Precision | Hmean | Download | -| :---------------------------------------------------------: | :--------------: | :-------------: | :-----------: | :-----: | :-------: | :----: | :-------: | :---: | :-----------------------------------------------------------: | -| [MaskRCNN](/configs/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017.py) | ImageNet | ICDAR2017 Train | ICDAR2017 Val | 160 | 1600 | 0.754 | 0.827 | 0.789 | [model](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.pth) \| [log](https://download.openmmlab.com/mmocr/textdet/maskrcnn/mask_rcnn_r50_fpn_160e_icdar2017_20210218-c6ec3ebb.log.json) | - -```{note} -We tuned parameters with the techniques in [Pyramid Mask Text Detector](https://arxiv.org/abs/1903.11800) -``` - -## Citation - -```bibtex -@INPROCEEDINGS{8237584, - author={K. {He} and G. {Gkioxari} and P. {Dollár} and R. {Girshick}}, - booktitle={2017 IEEE International Conference on Computer Vision (ICCV)}, - title={Mask R-CNN}, - year={2017}, - pages={2980-2988}, - doi={10.1109/ICCV.2017.322}} -``` diff --git a/spaces/LuxOAI/ChatGpt-Web/app/api/openai/typing.ts b/spaces/LuxOAI/ChatGpt-Web/app/api/openai/typing.ts deleted file mode 100644 index 2286d23124e13121371f83f3d11eff40303e79ca..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/api/openai/typing.ts +++ /dev/null @@ -1,9 +0,0 @@ -import type { - CreateChatCompletionRequest, - CreateChatCompletionResponse, -} from "openai"; - -export type ChatRequest = CreateChatCompletionRequest; -export type ChatResponse = CreateChatCompletionResponse; - -export type Updater = (updater: (value: T) => void) => void; diff --git a/spaces/ML701G7/taim-gan/README.md b/spaces/ML701G7/taim-gan/README.md deleted file mode 100644 index 3d479bf91007a000e6e5575b044c6f3e7a655df4..0000000000000000000000000000000000000000 --- a/spaces/ML701G7/taim-gan/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Taim Gan -emoji: 🐠 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MarkMcCormack/NLP-EduTech-App/assessmentDashboard.py b/spaces/MarkMcCormack/NLP-EduTech-App/assessmentDashboard.py deleted file mode 100644 index 7a7f1cb9bcc60ea0d600ebb49c2989f774acb0d8..0000000000000000000000000000000000000000 --- a/spaces/MarkMcCormack/NLP-EduTech-App/assessmentDashboard.py +++ /dev/null @@ -1,33 +0,0 @@ -import streamlit as st -from promptTemplates import * -from utils import createComponent - -def run(): - # Title - st.title('🏫 LLM/GPT in Assessment As/Of/For Learning') - - # Define columns for layout - left, right = st.columns(2) - - with left: - st.subheader("Assessment As: Review as Actor") - - createComponent(generalAutomationTemplate, "Student Peer Review", "Student: Please enter your assignment question, assignment content and requirements") - createComponent(automatedFeedbackTemplate, "Teacher Review", "Teacher: Please enter your assignment question, assignment content and requirements") - createComponent(automatedFeedbackTemplate, "Examiner Review", "Examiner: Please enter your assignment question, assignment content and requirements") - - st.subheader("Assessment Of: Progress Tracker") - - createComponent(studentProgressTemplate, "Student Study Planner", "Please enter your module name, structure, timeline and requirements") - createComponent(selfAssessmentTemplate, "Self Assessment with Context", "Please enter your study planner and what you want to be examined on") - - with right: - st.subheader("Assessment For: Copilot Peer Review") - - createComponent(automaticRoleTask, "Automatic Role Generation", "Please enter the type of task the agent should help you complete") - createComponent(specificRoleTask, "Specific Role Generation", "Please enter the type of role the agent should adopt and how they can help you complete the task") - - st.subheader("General Assessment: Automation") - - createComponent(generalAutomationTemplate, "Generate Automation Scripts", "Please enter the type of assignment, marking schemes and what role to examine as") - createComponent(automatedFeedbackTemplate, "Automated Feedback from Existing Knowledge", "Please enter the assignment content, marking schemes and what role to examine as") \ No newline at end of file diff --git a/spaces/MashiroSA/sovits-emu-voice-transform/onnxexport/model_onnx.py b/spaces/MashiroSA/sovits-emu-voice-transform/onnxexport/model_onnx.py deleted file mode 100644 index e28bae95ec1e53aa05d06fc784ff86d55f228d60..0000000000000000000000000000000000000000 --- a/spaces/MashiroSA/sovits-emu-voice-transform/onnxexport/model_onnx.py +++ /dev/null @@ -1,335 +0,0 @@ -import torch -from torch import nn -from torch.nn import functional as F - -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -import utils -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - kernel_size, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.gin_channels = gin_channels - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_mask, f0=None, z=None): - x = x + self.f0_emb(f0).transpose(1, 2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + z * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class F0Decoder(nn.Module): - def __init__(self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=0): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.spk_channels = spk_channels - - self.prenet = nn.Conv1d(hidden_channels, hidden_channels, 3, padding=1) - self.decoder = attentions.FFT( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.f0_prenet = nn.Conv1d(1, hidden_channels, 3, padding=1) - self.cond = nn.Conv1d(spk_channels, hidden_channels, 1) - - def forward(self, x, norm_f0, x_mask, spk_emb=None): - x = torch.detach(x) - if spk_emb is not None: - x = x + self.cond(spk_emb) - x += self.f0_prenet(norm_f0) - x = self.prenet(x) * x_mask - x = self.decoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - sampling_rate=44100, - **kwargs): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.pre = nn.Conv1d(ssl_dim, hidden_channels, kernel_size=5, padding=2) - - self.enc_p = TextEncoder( - inter_channels, - hidden_channels, - filter_channels=filter_channels, - n_heads=n_heads, - n_layers=n_layers, - kernel_size=kernel_size, - p_dropout=p_dropout - ) - hps = { - "sampling_rate": sampling_rate, - "inter_channels": inter_channels, - "resblock": resblock, - "resblock_kernel_sizes": resblock_kernel_sizes, - "resblock_dilation_sizes": resblock_dilation_sizes, - "upsample_rates": upsample_rates, - "upsample_initial_channel": upsample_initial_channel, - "upsample_kernel_sizes": upsample_kernel_sizes, - "gin_channels": gin_channels, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - self.f0_decoder = F0Decoder( - 1, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - spk_channels=gin_channels - ) - self.emb_uv = nn.Embedding(2, hidden_channels) - self.predict_f0 = False - - def forward(self, c, f0, mel2ph, uv, noise=None, g=None): - - decoder_inp = F.pad(c, [0, 0, 1, 0]) - mel2ph_ = mel2ph.unsqueeze(2).repeat([1, 1, c.shape[-1]]) - c = torch.gather(decoder_inp, 1, mel2ph_).transpose(1, 2) # [B, T, H] - - c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device) - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - x_mask = torch.unsqueeze(commons.sequence_mask(c_lengths, c.size(2)), 1).to(c.dtype) - x = self.pre(c) * x_mask + self.emb_uv(uv.long()).transpose(1, 2) - - if self.predict_f0: - lf0 = 2595. * torch.log10(1. + f0.unsqueeze(1) / 700.) / 500 - norm_lf0 = utils.normalize_f0(lf0, x_mask, uv, random_scale=False) - pred_lf0 = self.f0_decoder(x, norm_lf0, x_mask, spk_emb=g) - f0 = (700 * (torch.pow(10, pred_lf0 * 500 / 2595) - 1)).squeeze(1) - - z_p, m_p, logs_p, c_mask = self.enc_p(x, x_mask, f0=f0_to_coarse(f0), z=noise) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0) - return o diff --git a/spaces/MathysL/AutoGPT4/autogpt/memory/__init__.py b/spaces/MathysL/AutoGPT4/autogpt/memory/__init__.py deleted file mode 100644 index 3d18704c70dfc287642b1923e6f2e1f72a5f2a62..0000000000000000000000000000000000000000 --- a/spaces/MathysL/AutoGPT4/autogpt/memory/__init__.py +++ /dev/null @@ -1,99 +0,0 @@ -from autogpt.memory.local import LocalCache -from autogpt.memory.no_memory import NoMemory - -# List of supported memory backends -# Add a backend to this list if the import attempt is successful -supported_memory = ["local", "no_memory"] - -try: - from autogpt.memory.redismem import RedisMemory - - supported_memory.append("redis") -except ImportError: - # print("Redis not installed. Skipping import.") - RedisMemory = None - -try: - from autogpt.memory.pinecone import PineconeMemory - - supported_memory.append("pinecone") -except ImportError: - # print("Pinecone not installed. Skipping import.") - PineconeMemory = None - -try: - from autogpt.memory.weaviate import WeaviateMemory - - supported_memory.append("weaviate") -except ImportError: - # print("Weaviate not installed. Skipping import.") - WeaviateMemory = None - -try: - from autogpt.memory.milvus import MilvusMemory - - supported_memory.append("milvus") -except ImportError: - # print("pymilvus not installed. Skipping import.") - MilvusMemory = None - - -def get_memory(cfg, init=False): - memory = None - if cfg.memory_backend == "pinecone": - if not PineconeMemory: - print( - "Error: Pinecone is not installed. Please install pinecone" - " to use Pinecone as a memory backend." - ) - else: - memory = PineconeMemory(cfg) - if init: - memory.clear() - elif cfg.memory_backend == "redis": - if not RedisMemory: - print( - "Error: Redis is not installed. Please install redis-py to" - " use Redis as a memory backend." - ) - else: - memory = RedisMemory(cfg) - elif cfg.memory_backend == "weaviate": - if not WeaviateMemory: - print( - "Error: Weaviate is not installed. Please install weaviate-client to" - " use Weaviate as a memory backend." - ) - else: - memory = WeaviateMemory(cfg) - elif cfg.memory_backend == "milvus": - if not MilvusMemory: - print( - "Error: Milvus sdk is not installed." - "Please install pymilvus to use Milvus as memory backend." - ) - else: - memory = MilvusMemory(cfg) - elif cfg.memory_backend == "no_memory": - memory = NoMemory(cfg) - - if memory is None: - memory = LocalCache(cfg) - if init: - memory.clear() - return memory - - -def get_supported_memory_backends(): - return supported_memory - - -__all__ = [ - "get_memory", - "LocalCache", - "RedisMemory", - "PineconeMemory", - "NoMemory", - "MilvusMemory", - "WeaviateMemory", -] diff --git a/spaces/Matthijs/image2reverb/README.md b/spaces/Matthijs/image2reverb/README.md deleted file mode 100644 index 956ab5811d3696d639f6e59a69ffc8ab0e615441..0000000000000000000000000000000000000000 --- a/spaces/Matthijs/image2reverb/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image2Reverb -emoji: 🎶 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/MaxReimann/Whitebox-Style-Transfer-Editing/tasks.py b/spaces/MaxReimann/Whitebox-Style-Transfer-Editing/tasks.py deleted file mode 100644 index a3b5095ac0824efe9cb0b970e27b3f813748a1d9..0000000000000000000000000000000000000000 --- a/spaces/MaxReimann/Whitebox-Style-Transfer-Editing/tasks.py +++ /dev/null @@ -1,143 +0,0 @@ -import base64 -import datetime -import os -import sys -from io import BytesIO -from pathlib import Path -import numpy as np -import requests -import torch -import torch.nn.functional as F -from PIL import Image -import time -import streamlit as st -from demo_config import HUGGING_FACE, WORKER_URL - - - -PACKAGE_PARENT = 'wise' -SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))) -sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))) - -from parameter_optimization.parametric_styletransfer import single_optimize -from parameter_optimization.parametric_styletransfer import CONFIG as ST_CONFIG -from parameter_optimization.strotss_org import strotss, pil_resize_long_edge_to -from helpers import torch_to_np, np_to_torch - -def retrieve_for_results_from_server(): - task_id = st.session_state['current_server_task_id'] - vp_res = requests.get(WORKER_URL+"/get_vp", params={"task_id": task_id}) - image_res = requests.get(WORKER_URL+"/get_image", params={"task_id": task_id}) - if vp_res.status_code != 200 or image_res.status_code != 200: - st.warning("got status for " + WORKER_URL+"/get_vp" + str(vp_res.status_code)) - st.warning("got status for " + WORKER_URL+"/image_res" + str(image_res.status_code)) - st.session_state['current_server_task_id'] = None - vp_res.raise_for_status() - image_res.raise_for_status() - else: - st.session_state['current_server_task_id'] = None - vp = np.load(BytesIO(vp_res.content))["vp"] - print("received vp from server") - print("got numpy array", vp.shape) - vp = torch.from_numpy(vp).cuda() - image = Image.open(BytesIO(image_res.content)) - print("received image from server") - image = np_to_torch(np.asarray(image)).cuda() - - st.session_state["effect_input"] = image - st.session_state["result_vp"] = vp - - -def monitor_task(progress_placeholder): - task_id = st.session_state['current_server_task_id'] - - started_time = time.time() - retries = 3 - with progress_placeholder.container(): - st.warning("Do not interact with the app until results are shown - otherwise results might be lost.") - progress_bar = st.empty() - while True: - status = requests.get(WORKER_URL+"/get_status", params={"task_id": task_id}) - if status.status_code != 200: - print("get_status got status_code", status.status_code) - st.warning(status.content) - retries -= 1 - if retries == 0: - return - else: - time.sleep(2) - continue - status = status.json() - print(status) - if status["status"] != "running" and status["status"] != "queued" : - if status["msg"] != "": - print("got error for task", task_id, ":", status["msg"]) - progress_placeholder.error(status["msg"]) - st.session_state['current_server_task_id'] = None - st.stop() - if status["status"] == "finished": - retrieve_for_results_from_server() - return - elif status["status"] == "queued": - started_time = time.time() - queue_length = requests.get(WORKER_URL+"/queue_length").json() - progress_bar.write(f"There are {queue_length['length']} tasks in the queue") - elif status["progress"] == 0.0: - progressed = min(0.5 * (time.time() - started_time) / 80.0, 0.5) #estimate 80s for strotts - progress_bar.progress(progressed) - else: - progress_bar.progress(min(0.5 + status["progress"] / 2.0, 1.0)) - - time.sleep(2) - -def get_queue_length(): - queue_length = requests.get(WORKER_URL+"/queue_length").json() - return queue_length['length'] - - -def optimize_on_server(content, style, result_image_placeholder): - content_path=f"/tmp/content-wise-uploaded{str(datetime.datetime.timestamp(datetime.datetime.now()))}.jpg" - style_path=f"/tmp/content-wise-uploaded{str(datetime.datetime.timestamp(datetime.datetime.now()))}.jpg" - asp_c, asp_s = content.height / content.width, style.height / style.width - if any(a < 0.5 or a > 2.0 for a in (asp_c, asp_s)): - result_image_placeholder.error('aspect ratio must be <= 2') - st.stop() - - content = pil_resize_long_edge_to(content, 1024) - content.save(content_path) - style = pil_resize_long_edge_to(style, 1024) - style.save(style_path) - files = {'style-image': open(style_path, "rb"), "content-image": open(content_path, "rb")} - print("start-optimizing. Time: ", datetime.datetime.now()) - url = WORKER_URL + "/upload" - task_id_res = requests.post(url, files=files) - if task_id_res.status_code != 200: - result_image_placeholder.error(task_id_res.content) - st.stop() - else: - task_id = task_id_res.json()['task_id'] - st.session_state['current_server_task_id'] = task_id - - monitor_task(result_image_placeholder) - -def optimize_params(effect, preset, content, style, result_image_placeholder): - result_image_placeholder.text("Executing NST to create reference image..") - base_dir = f"result/{datetime.datetime.now().strftime(r'%Y-%m-%d %H.%Mh %Ss')}" - os.makedirs(base_dir) - reference = strotss(pil_resize_long_edge_to(content, 1024), - pil_resize_long_edge_to(style, 1024), content_weight=16.0, - device=torch.device("cuda"), space="uniform") - progress_bar = result_image_placeholder.progress(0.0) - ref_save_path = os.path.join(base_dir, "reference.jpg") - content_save_path = os.path.join(base_dir, "content.jpg") - resize_to = 720 - reference = pil_resize_long_edge_to(reference, resize_to) - reference.save(ref_save_path) - content.save(content_save_path) - ST_CONFIG["n_iterations"] = 300 - - vp, content_img_cuda = single_optimize(effect, preset, "l1", content_save_path, str(ref_save_path), - write_video=False, base_dir=base_dir, - iter_callback=lambda i: progress_bar.progress( - float(i) / ST_CONFIG["n_iterations"])) - st.session_state["effect_input"], st.session_state["result_vp"] = content_img_cuda.detach(), vp.cuda().detach() diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/closure.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/closure.py deleted file mode 100644 index b955f81f425be4ac3e6bb3f4aac653887989e872..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/closure.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .hook import HOOKS, Hook - - -@HOOKS.register_module() -class ClosureHook(Hook): - - def __init__(self, fn_name, fn): - assert hasattr(self, fn_name) - assert callable(fn) - setattr(self, fn_name, fn) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/dpm_solver/sampler.py b/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index 7d137b8cf36718c1c58faa09f9dd919e5fb2977b..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,87 +0,0 @@ -"""SAMPLING ONLY.""" -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - - -MODEL_TYPES = { - "eps": "noise", - "v": "v" -} - - -class DPMSolverSampler(object): - def __init__(self, model, **kwargs): - super().__init__() - self.model = model - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type=MODEL_TYPES[self.model.parameterization], - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) - - return x.to(device), None \ No newline at end of file diff --git a/spaces/MihaiPopa2/ChatGPT-Prompt-Generator/app.py b/spaces/MihaiPopa2/ChatGPT-Prompt-Generator/app.py deleted file mode 100644 index 90e8befa2beb7d8847c359922a901604c582773d..0000000000000000000000000000000000000000 --- a/spaces/MihaiPopa2/ChatGPT-Prompt-Generator/app.py +++ /dev/null @@ -1,19 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -import gradio as gr - -tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long") -model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True) - -def generate(prompt): - - batch = tokenizer(prompt, return_tensors="pt") - generated_ids = model.generate(batch["input_ids"], max_new_tokens=150) - output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) - return output[0] - -input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer") -output_component = gr.Textbox(label = "Prompt") -examples = [["photographer"], ["developer"]] -description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). 📓 Simply enter a persona that you want the prompt to be generated based on. 🧙🏻🧑🏻‍🚀🧑🏻‍🎨🧑🏻‍🔬🧑🏻‍💻🧑🏼‍🏫🧑🏽‍🌾" -gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "👨🏻‍🎤 ChatGPT Prompt Generator 👨🏻‍🎤", description=description).launch() - diff --git a/spaces/MirageML/sjc/misc.py b/spaces/MirageML/sjc/misc.py deleted file mode 100644 index d6675b9e984c6cea13b15ef1eb53ca308f4c2464..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/misc.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -import torch - - -def torch_samps_to_imgs(imgs, uncenter=True): - if uncenter: - imgs = (imgs + 1) / 2 # [-1, 1] -> [0, 1] - imgs = (imgs * 255).clamp(0, 255) - imgs = imgs.to(torch.uint8) - imgs = imgs.permute(0, 2, 3, 1) - imgs = imgs.cpu().numpy() - return imgs - - -def imgs_to_torch(imgs): - assert imgs.dtype == np.uint8 - assert len(imgs.shape) == 4 and imgs.shape[-1] == 3, "expect (N, H, W, C)" - _, H, W, _ = imgs.shape - - imgs = imgs.transpose(0, 3, 1, 2) - imgs = (imgs / 255).astype(np.float32) - imgs = (imgs * 2) - 1 - imgs = torch.as_tensor(imgs) - H, W = [_l - (_l % 32) for _l in (H, W)] - imgs = torch.nn.functional.interpolate(imgs, (H, W), mode="bilinear") - return imgs - - -def test_encode_decode(): - import imageio - from run_img_sampling import ScoreAdapter, SD - from vis import _draw - - fname = "~/clean.png" - raw = imageio.imread(fname) - raw = imgs_to_torch(raw[np.newaxis, ...]) - - model: ScoreAdapter = SD().run() - raw = raw.to(model.device) - zs = model.encode(raw) - img = model.decode(zs) - img = torch_samps_to_imgs(img) - _draw( - [imageio.imread(fname), img.squeeze(0)], - ) - - -def test(): - test_encode_decode() - - -if __name__ == "__main__": - test() diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/svtr/svtr-small_20e_st_mj.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/svtr/svtr-small_20e_st_mj.py deleted file mode 100644 index bd73e46f82b5fae242386f7ea1780ba48991fada..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/svtr/svtr-small_20e_st_mj.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - 'svtr-tiny_20e_st_mj.py', -] - -model = dict( - encoder=dict( - embed_dims=[96, 192, 256], - depth=[3, 6, 6], - num_heads=[3, 6, 8], - mixer_types=['Local'] * 8 + ['Global'] * 7)) diff --git a/spaces/NATSpeech/PortaSpeech/modules/tts/portaspeech/portaspeech_flow.py b/spaces/NATSpeech/PortaSpeech/modules/tts/portaspeech/portaspeech_flow.py deleted file mode 100644 index 256887dd8b365e38ac6c1973f4ec376e93029652..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/modules/tts/portaspeech/portaspeech_flow.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -import torch.distributions as dist -from torch import nn -from modules.commons.normalizing_flow.glow_modules import Glow -from modules.tts.portaspeech.portaspeech import PortaSpeech - - -class PortaSpeechFlow(PortaSpeech): - def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None): - super().__init__(ph_dict_size, word_dict_size, hparams, out_dims) - cond_hs = 80 - if hparams.get('use_txt_cond', True): - cond_hs = cond_hs + hparams['hidden_size'] - if hparams.get('use_latent_cond', False): - cond_hs = cond_hs + hparams['latent_size'] - if hparams['use_cond_proj']: - self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2) - cond_hs = 160 - self.post_flow = Glow( - 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1, - hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'], - n_split=4, n_sqz=2, - gin_channels=cond_hs, - share_cond_layers=hparams['post_share_cond_layers'], - share_wn_layers=hparams['share_wn_layers'], - sigmoid_scale=hparams['sigmoid_scale'] - ) - self.prior_dist = dist.Normal(0, 1) - - def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, - spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, - forward_post_glow=True, two_stage=True, global_step=None): - is_training = self.training - train_fvae = not (forward_post_glow and two_stage) - if not train_fvae: - self.eval() - with torch.set_grad_enabled(mode=train_fvae): - ret = super(PortaSpeechFlow, self).forward( - txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, - spk_embed, spk_id, pitch, infer, tgt_mels, global_step) - if (forward_post_glow or not two_stage) and self.hparams['use_post_flow']: - self.run_post_glow(tgt_mels, infer, is_training, ret) - return ret - - def run_post_glow(self, tgt_mels, infer, is_training, ret): - x_recon = ret['mel_out'].transpose(1, 2) - g = x_recon - B, _, T = g.shape - if self.hparams.get('use_txt_cond', True): - g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1) - if self.hparams.get('use_latent_cond', False): - g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T) - g = torch.cat([g, g_z], 1) - if self.hparams['use_cond_proj']: - g = self.g_proj(g) - prior_dist = self.prior_dist - if not infer: - if is_training: - self.post_flow.train() - nonpadding = ret['nonpadding'].transpose(1, 2) - y_lengths = nonpadding.sum(-1) - if self.hparams['detach_postflow_input']: - g = g.detach() - tgt_mels = tgt_mels.transpose(1, 2) - z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g) - ldj = ldj / y_lengths / 80 - ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj - ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean() - if torch.isnan(ret['postflow']): - ret['postflow'] = None - else: - nonpadding = torch.ones_like(x_recon[:, :1, :]) - z_post = torch.randn(x_recon.shape).to(g.device) * self.hparams['noise_scale'] - x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True) - ret['mel_out'] = x_recon.transpose(1, 2) diff --git a/spaces/Nahidabyer/img-to-music/style.css b/spaces/Nahidabyer/img-to-music/style.css deleted file mode 100644 index 8f7397fe7f0971636015170df075cd2d070344ec..0000000000000000000000000000000000000000 --- a/spaces/Nahidabyer/img-to-music/style.css +++ /dev/null @@ -1,51 +0,0 @@ -#col-container {max-width: 510px; margin-left: auto; margin-right: auto;} -a {text-decoration-line: underline; font-weight: 600;} -div#music-output .h-full { - min-height: 5rem; -} -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/fconv_lm.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/fconv_lm.py deleted file mode 100644 index 4b243d6669cb57880353b45a01843ec22010fb5f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/fconv_lm.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq import utils -from fairseq.models import ( - FairseqLanguageModel, - register_model, - register_model_architecture, -) -from fairseq.models.fconv import FConvDecoder -from fairseq.utils import safe_hasattr - - -@register_model("fconv_lm") -class FConvLanguageModel(FairseqLanguageModel): - def __init__(self, decoder): - super().__init__(decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--dropout", type=float, metavar="D", help="dropout probability" - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-layers", - type=str, - metavar="EXPR", - help="decoder layers [(dim, kernel_size), ...]", - ) - parser.add_argument( - "--decoder-out-embed-dim", - type=int, - metavar="N", - help="decoder output embedding dimension", - ) - parser.add_argument( - "--adaptive-softmax-cutoff", - metavar="EXPR", - help="comma separated list of adaptive softmax cutoff points. " - "Must be used with adaptive_loss criterion", - ) - parser.add_argument( - "--adaptive-softmax-dropout", - type=float, - metavar="D", - help="sets adaptive softmax dropout for the tail projections", - ) - parser.add_argument( - "--decoder-attention", - type=str, - metavar="EXPR", - help="decoder attention [True, ...]", - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure all arguments are present in older models - base_lm_architecture(args) - - if safe_hasattr(args, "max_target_positions") and not safe_hasattr( - args, "tokens_per_sample" - ): - args.tokens_per_sample = args.max_target_positions - - decoder = FConvDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - convolutions=eval(args.decoder_layers), - out_embed_dim=args.decoder_embed_dim, - attention=eval(args.decoder_attention), - dropout=args.dropout, - max_positions=args.tokens_per_sample, - share_embed=False, - positional_embeddings=False, - adaptive_softmax_cutoff=( - utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) - if args.criterion == "adaptive_loss" - else None - ), - adaptive_softmax_dropout=args.adaptive_softmax_dropout, - ) - return FConvLanguageModel(decoder) - - -@register_model_architecture("fconv_lm", "fconv_lm") -def base_lm_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) - args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13") - args.decoder_attention = getattr(args, "decoder_attention", "False") - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - - -@register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103") -def fconv_lm_dauphin_wikitext103(args): - layers = "[(850, 6)] * 3" - layers += " + [(850, 1)] * 1" - layers += " + [(850, 5)] * 4" - layers += " + [(850, 1)] * 1" - layers += " + [(850, 4)] * 3" - layers += " + [(1024, 4)] * 1" - layers += " + [(2048, 4)] * 1" - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280) - args.decoder_layers = getattr(args, "decoder_layers", layers) - args.decoder_attention = getattr(args, "decoder_attention", "False") - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,20000,200000" - ) - base_lm_architecture(args) - - -@register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw") -def fconv_lm_dauphin_gbw(args): - layers = "[(512, 5)]" - layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3" - layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3" - layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6" - layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]" - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) - args.decoder_layers = getattr(args, "decoder_layers", layers) - args.decoder_attention = getattr(args, "decoder_attention", "False") - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,50000,200000" - ) - base_lm_architecture(args) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/README.custom_classification.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/README.custom_classification.md deleted file mode 100644 index 7254bb7d178760ef5b847901bbcac3711af33ca2..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/README.custom_classification.md +++ /dev/null @@ -1,168 +0,0 @@ -# Finetuning RoBERTa on a custom classification task - -This example shows how to finetune RoBERTa on the IMDB dataset, but should illustrate the process for most classification tasks. - -### 1) Get the data - -```bash -wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz -tar zxvf aclImdb_v1.tar.gz -``` - - -### 2) Format data - -`IMDB` data has one data-sample in each file, below python code-snippet converts it one file for train and valid each for ease of processing. -```python -import argparse -import os -import random -from glob import glob - -random.seed(0) - -def main(args): - for split in ['train', 'test']: - samples = [] - for class_label in ['pos', 'neg']: - fnames = glob(os.path.join(args.datadir, split, class_label) + '/*.txt') - for fname in fnames: - with open(fname) as fin: - line = fin.readline() - samples.append((line, 1 if class_label == 'pos' else 0)) - random.shuffle(samples) - out_fname = 'train' if split == 'train' else 'dev' - f1 = open(os.path.join(args.datadir, out_fname + '.input0'), 'w') - f2 = open(os.path.join(args.datadir, out_fname + '.label'), 'w') - for sample in samples: - f1.write(sample[0] + '\n') - f2.write(str(sample[1]) + '\n') - f1.close() - f2.close() - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--datadir', default='aclImdb') - args = parser.parse_args() - main(args) -``` - - -### 3) BPE encode - -Run `multiprocessing_bpe_encoder`, you can also do this in previous step for each sample but that might be slower. -```bash -# Download encoder.json and vocab.bpe -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' - -for SPLIT in train dev; do - python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "aclImdb/$SPLIT.input0" \ - --outputs "aclImdb/$SPLIT.input0.bpe" \ - --workers 60 \ - --keep-empty -done -``` - - -### 4) Preprocess data - -```bash -# Download fairseq dictionary. -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt' - -fairseq-preprocess \ - --only-source \ - --trainpref "aclImdb/train.input0.bpe" \ - --validpref "aclImdb/dev.input0.bpe" \ - --destdir "IMDB-bin/input0" \ - --workers 60 \ - --srcdict dict.txt - -fairseq-preprocess \ - --only-source \ - --trainpref "aclImdb/train.label" \ - --validpref "aclImdb/dev.label" \ - --destdir "IMDB-bin/label" \ - --workers 60 - -``` - - -### 5) Run training - -```bash -TOTAL_NUM_UPDATES=7812 # 10 epochs through IMDB for bsz 32 -WARMUP_UPDATES=469 # 6 percent of the number of updates -LR=1e-05 # Peak LR for polynomial LR scheduler. -HEAD_NAME=imdb_head # Custom name for the classification head. -NUM_CLASSES=2 # Number of classes for the classification task. -MAX_SENTENCES=8 # Batch size. -ROBERTA_PATH=/path/to/roberta.large/model.pt - -CUDA_VISIBLE_DEVICES=0 fairseq-train IMDB-bin/ \ - --restore-file $ROBERTA_PATH \ - --max-positions 512 \ - --batch-size $MAX_SENTENCES \ - --max-tokens 4400 \ - --task sentence_prediction \ - --reset-optimizer --reset-dataloader --reset-meters \ - --required-batch-size-multiple 1 \ - --init-token 0 --separator-token 2 \ - --arch roberta_large \ - --criterion sentence_prediction \ - --classification-head-name $HEAD_NAME \ - --num-classes $NUM_CLASSES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \ - --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \ - --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \ - --max-epoch 10 \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --shorten-method "truncate" \ - --find-unused-parameters \ - --update-freq 4 -``` - -The above command will finetune RoBERTa-large with an effective batch-size of 32 -sentences (`--batch-size=8 --update-freq=4`). The expected -`best-validation-accuracy` after 10 epochs is ~96.5%. - -If you run out of GPU memory, try decreasing `--batch-size` and increase -`--update-freq` to compensate. - - -### 6) Load model using hub interface - -Now we can load the trained model checkpoint using the RoBERTa hub interface. - -Assuming your checkpoints are stored in `checkpoints/`: -```python -from fairseq.models.roberta import RobertaModel -roberta = RobertaModel.from_pretrained( - 'checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='IMDB-bin' -) -roberta.eval() # disable dropout -``` - -Finally you can make predictions using the `imdb_head` (or whatever you set -`--classification-head-name` to during training): -```python -label_fn = lambda label: roberta.task.label_dictionary.string( - [label + roberta.task.label_dictionary.nspecial] -) - -tokens = roberta.encode('Best movie this year') -pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item()) -assert pred == '1' # positive - -tokens = roberta.encode('Worst movie ever') -pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item()) -assert pred == '0' # negative -``` diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py deleted file mode 100644 index 216093f7087a61060767babf5a3f3f4e716a4dfe..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os - -import numpy as np -import torch -from fairseq.data import ( - Dictionary, - IdDataset, - ListDataset, - NestedDictionaryDataset, - NumelDataset, - NumSamplesDataset, - RawLabelDataset, - RightPadDataset, - SortDataset, - data_utils, - encoders, -) -from fairseq.tasks import LegacyFairseqTask, register_task - - -@register_task("commonsense_qa") -class CommonsenseQATask(LegacyFairseqTask): - """Task to finetune RoBERTa for Commonsense QA.""" - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument( - "data", metavar="DIR", help="path to data directory; we load .jsonl" - ) - parser.add_argument( - "--init-token", - type=int, - default=None, - help="add token at the beginning of each batch item", - ) - parser.add_argument("--num-classes", type=int, default=5) - - def __init__(self, args, vocab): - super().__init__(args) - self.vocab = vocab - self.mask = vocab.add_symbol("") - - self.bpe = encoders.build_bpe(args) - - @classmethod - def load_dictionary(cls, filename): - """Load the dictionary from the filename - - Args: - filename (str): the filename - """ - dictionary = Dictionary.load(filename) - dictionary.add_symbol("") - return dictionary - - @classmethod - def setup_task(cls, args, **kwargs): - assert ( - args.criterion == "sentence_ranking" - ), "Must set --criterion=sentence_ranking" - - # load data and label dictionaries - vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) - print("| dictionary: {} types".format(len(vocab))) - - return cls(args, vocab) - - def load_dataset( - self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs - ): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - - def binarize(s, append_bos=False): - if self.bpe is not None: - s = self.bpe.encode(s) - tokens = self.vocab.encode_line( - s, - append_eos=True, - add_if_not_exist=False, - ).long() - if append_bos and self.args.init_token is not None: - tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) - return tokens - - if data_path is None: - data_path = os.path.join(self.args.data, split + ".jsonl") - if not os.path.exists(data_path): - raise FileNotFoundError("Cannot find data: {}".format(data_path)) - - src_tokens = [[] for i in range(self.args.num_classes)] - src_lengths = [[] for i in range(self.args.num_classes)] - labels = [] - - with open(data_path) as h: - for line in h: - example = json.loads(line.strip()) - if "answerKey" in example: - label = ord(example["answerKey"]) - ord("A") - labels.append(label) - question = example["question"]["stem"] - assert len(example["question"]["choices"]) == self.args.num_classes - # format: ` Q: Where would I not want a fox? A: hen house ` - question = "Q: " + question - question_toks = binarize(question, append_bos=True) - for i, choice in enumerate(example["question"]["choices"]): - src = "A: " + choice["text"] - src_bin = torch.cat([question_toks, binarize(src)]) - src_tokens[i].append(src_bin) - src_lengths[i].append(len(src_bin)) - assert all( - len(src_tokens[0]) == len(src_tokens[i]) - for i in range(self.args.num_classes) - ) - assert len(src_tokens[0]) == len(src_lengths[0]) - assert len(labels) == 0 or len(labels) == len(src_tokens[0]) - - for i in range(self.args.num_classes): - src_lengths[i] = np.array(src_lengths[i]) - src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i]) - src_lengths[i] = ListDataset(src_lengths[i]) - - dataset = { - "id": IdDataset(), - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(src_tokens[0], reduce=True), - } - - for i in range(self.args.num_classes): - dataset.update( - { - "net_input{}".format(i + 1): { - "src_tokens": RightPadDataset( - src_tokens[i], - pad_idx=self.source_dictionary.pad(), - ), - "src_lengths": src_lengths[i], - } - } - ) - - if len(labels) > 0: - dataset.update({"target": RawLabelDataset(labels)}) - - dataset = NestedDictionaryDataset( - dataset, - sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])], - ) - - with data_utils.numpy_seed(self.args.seed): - dataset = SortDataset( - dataset, - # shuffle - sort_order=[np.random.permutation(len(dataset))], - ) - - print("| Loaded {} with {} samples".format(split, len(dataset))) - - self.datasets[split] = dataset - return self.datasets[split] - - def build_model(self, args): - from fairseq import models - - model = models.build_model(args, self) - - model.register_classification_head( - "sentence_classification_head", - num_classes=1, - ) - - return model - - @property - def source_dictionary(self): - return self.vocab - - @property - def target_dictionary(self): - return self.vocab diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/data/random_input_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/data/random_input_dataset.py deleted file mode 100644 index 886505616cc7f7a515ecebf34fae5c2bc541de03..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/data/random_input_dataset.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import random -from typing import List - -from fairseq.data import BaseWrapperDataset, data_utils - - -class RandomInputDataset(BaseWrapperDataset): - def __init__( - self, - dataset, - random_input_dataset, - input_key_path: List[str], - add_to_input, - pad_idx, - ): - super().__init__(dataset) - self.random_input_dataset = random_input_dataset - if isinstance(input_key_path, str): - input_key_path = [input_key_path] - assert len(input_key_path) > 0 - self.input_key_path = input_key_path - self.add_to_input = add_to_input - self.pad_idx = pad_idx - - def get_target(self, item): - target_loc = item - for p in self.input_key_path[:-1]: - target_loc = target_loc[p] - return self.input_key_path[-1], target_loc - - def get_target_value(self, item): - k, target_loc = self.get_target(item) - return target_loc[k] - - def __getitem__(self, index): - item = self.dataset[index] - k, target_loc = self.get_target(item) - target_loc[k] = random.choice(self.random_input_dataset) - return item - - def collater(self, samples): - collated = self.dataset.collater(samples) - if len(collated) == 0: - return collated - indices = set(collated["id"].tolist()) - - random_inputs = data_utils.collate_tokens( - [self.get_target_value(s) for s in samples if s["id"] in indices], - pad_idx=self.pad_idx, - left_pad=False, - ) - k, target_loc = self.get_target( - collated if not self.add_to_input else collated["net_input"] - ) - target_loc[k] = random_inputs - - return collated diff --git a/spaces/ORI-Muchim/RaidenTTS/monotonic_align/core.py b/spaces/ORI-Muchim/RaidenTTS/monotonic_align/core.py deleted file mode 100644 index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/RaidenTTS/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/models/ade20k/segm_lib/utils/data/dataset.py b/spaces/OpenGVLab/InternGPT/third-party/lama/models/ade20k/segm_lib/utils/data/dataset.py deleted file mode 100644 index 605aa877f7031a5cd2b98c0f831410aa80fddefa..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/models/ade20k/segm_lib/utils/data/dataset.py +++ /dev/null @@ -1,118 +0,0 @@ -import bisect -import warnings - -from torch._utils import _accumulate -from torch import randperm - - -class Dataset(object): - """An abstract class representing a Dataset. - - All other datasets should subclass it. All subclasses should override - ``__len__``, that provides the size of the dataset, and ``__getitem__``, - supporting integer indexing in range from 0 to len(self) exclusive. - """ - - def __getitem__(self, index): - raise NotImplementedError - - def __len__(self): - raise NotImplementedError - - def __add__(self, other): - return ConcatDataset([self, other]) - - -class TensorDataset(Dataset): - """Dataset wrapping data and target tensors. - - Each sample will be retrieved by indexing both tensors along the first - dimension. - - Arguments: - data_tensor (Tensor): contains sample data. - target_tensor (Tensor): contains sample targets (labels). - """ - - def __init__(self, data_tensor, target_tensor): - assert data_tensor.size(0) == target_tensor.size(0) - self.data_tensor = data_tensor - self.target_tensor = target_tensor - - def __getitem__(self, index): - return self.data_tensor[index], self.target_tensor[index] - - def __len__(self): - return self.data_tensor.size(0) - - -class ConcatDataset(Dataset): - """ - Dataset to concatenate multiple datasets. - Purpose: useful to assemble different existing datasets, possibly - large-scale datasets as the concatenation operation is done in an - on-the-fly manner. - - Arguments: - datasets (iterable): List of datasets to be concatenated - """ - - @staticmethod - def cumsum(sequence): - r, s = [], 0 - for e in sequence: - l = len(e) - r.append(l + s) - s += l - return r - - def __init__(self, datasets): - super(ConcatDataset, self).__init__() - assert len(datasets) > 0, 'datasets should not be an empty iterable' - self.datasets = list(datasets) - self.cumulative_sizes = self.cumsum(self.datasets) - - def __len__(self): - return self.cumulative_sizes[-1] - - def __getitem__(self, idx): - dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) - if dataset_idx == 0: - sample_idx = idx - else: - sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] - return self.datasets[dataset_idx][sample_idx] - - @property - def cummulative_sizes(self): - warnings.warn("cummulative_sizes attribute is renamed to " - "cumulative_sizes", DeprecationWarning, stacklevel=2) - return self.cumulative_sizes - - -class Subset(Dataset): - def __init__(self, dataset, indices): - self.dataset = dataset - self.indices = indices - - def __getitem__(self, idx): - return self.dataset[self.indices[idx]] - - def __len__(self): - return len(self.indices) - - -def random_split(dataset, lengths): - """ - Randomly split a dataset into non-overlapping new datasets of given lengths - ds - - Arguments: - dataset (Dataset): Dataset to be split - lengths (iterable): lengths of splits to be produced - """ - if sum(lengths) != len(dataset): - raise ValueError("Sum of input lengths does not equal the length of the input dataset!") - - indices = randperm(sum(lengths)) - return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)] diff --git a/spaces/PKUWilliamYang/StyleGANEX/datasets/inference_dataset.py b/spaces/PKUWilliamYang/StyleGANEX/datasets/inference_dataset.py deleted file mode 100644 index de457349b0726932176f21814c61e34f15955bb7..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/datasets/inference_dataset.py +++ /dev/null @@ -1,22 +0,0 @@ -from torch.utils.data import Dataset -from PIL import Image -from utils import data_utils - - -class InferenceDataset(Dataset): - - def __init__(self, root, opts, transform=None): - self.paths = sorted(data_utils.make_dataset(root)) - self.transform = transform - self.opts = opts - - def __len__(self): - return len(self.paths) - - def __getitem__(self, index): - from_path = self.paths[index] - from_im = Image.open(from_path) - from_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L') - if self.transform: - from_im = self.transform(from_im) - return from_im diff --git a/spaces/PSLD/PSLD/stable-diffusion/run/inverse_bip_ldm_laion.sh b/spaces/PSLD/PSLD/stable-diffusion/run/inverse_bip_ldm_laion.sh deleted file mode 100644 index 371a681f0ddc1fae024bfd433192c832d440ae81..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/run/inverse_bip_ldm_laion.sh +++ /dev/null @@ -1,13 +0,0 @@ -export CUDA_VISIBLE_DEVICES='1' -python scripts/inverse.py \ - --file_id='00478.png' \ - --task_config='configs/box_inpainting_config.yaml' \ - --inpainting=1 \ - --general_inverse=0 \ - --gamma=1e-2 \ - --omega=1e-1 \ - --W=256 \ - --H=256 \ - --scale=5.0 \ - --laion400m \ - --outdir="outputs/psld-ldm-laion400m-bip" diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/format.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/format.go deleted file mode 100644 index aec20c87dd94757b7872ec546c99e22b87773805..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/format.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/threads.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/threads.go deleted file mode 100644 index e0a579cbf5d836d32bab287d9428025c9f26e572..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/threads.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/bytecode/spec.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/bytecode/spec.go deleted file mode 100644 index 0c254f5632e39dd989bfa6e537565b27e4315a64..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/bytecode/spec.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/Deci-DeciCoder-1b/README.md b/spaces/PeepDaSlan9/Deci-DeciCoder-1b/README.md deleted file mode 100644 index cc0c6b5e5edd40ff82f409035ebd4c2dbe5b1ae3..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Deci-DeciCoder-1b/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Deci DeciCoder 1b -emoji: 📚 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/PeepDaSlan9/ToyWorld/index.html b/spaces/PeepDaSlan9/ToyWorld/index.html deleted file mode 100644 index 6250c2958a7186a4e64f21c02b0359ff5ecd7e97..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/ToyWorld/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/PeepDaSlan9/nitrosocke-mo-di-diffusion/app.py b/spaces/PeepDaSlan9/nitrosocke-mo-di-diffusion/app.py deleted file mode 100644 index fbe1e2c00bab513e1d932ffb89ce2b2bc45c27eb..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/nitrosocke-mo-di-diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/nitrosocke/mo-di-diffusion").launch() \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/image/geometric.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/image/geometric.py deleted file mode 100644 index cf97c201cb4e43796c911919d03fb26a07ed817d..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/image/geometric.py +++ /dev/null @@ -1,728 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import numbers - -import cv2 -import numpy as np - -from ..utils import to_2tuple -from .io import imread_backend - -try: - from PIL import Image -except ImportError: - Image = None - - -def _scale_size(size, scale): - """Rescale a size by a ratio. - - Args: - size (tuple[int]): (w, h). - scale (float | tuple(float)): Scaling factor. - - Returns: - tuple[int]: scaled size. - """ - if isinstance(scale, (float, int)): - scale = (scale, scale) - w, h = size - return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5) - - -cv2_interp_codes = { - 'nearest': cv2.INTER_NEAREST, - 'bilinear': cv2.INTER_LINEAR, - 'bicubic': cv2.INTER_CUBIC, - 'area': cv2.INTER_AREA, - 'lanczos': cv2.INTER_LANCZOS4 -} - -if Image is not None: - pillow_interp_codes = { - 'nearest': Image.NEAREST, - 'bilinear': Image.BILINEAR, - 'bicubic': Image.BICUBIC, - 'box': Image.BOX, - 'lanczos': Image.LANCZOS, - 'hamming': Image.HAMMING - } - - -def imresize(img, - size, - return_scale=False, - interpolation='bilinear', - out=None, - backend=None): - """Resize image to a given size. - - Args: - img (ndarray): The input image. - size (tuple[int]): Target size (w, h). - return_scale (bool): Whether to return `w_scale` and `h_scale`. - interpolation (str): Interpolation method, accepted values are - "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' - backend, "nearest", "bilinear" for 'pillow' backend. - out (ndarray): The output destination. - backend (str | None): The image resize backend type. Options are `cv2`, - `pillow`, `None`. If backend is None, the global imread_backend - specified by ``mmcv.use_backend()`` will be used. Default: None. - - Returns: - tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. - """ - h, w = img.shape[:2] - if backend is None: - backend = imread_backend - if backend not in ['cv2', 'pillow']: - raise ValueError(f'backend: {backend} is not supported for resize.' - f"Supported backends are 'cv2', 'pillow'") - - if backend == 'pillow': - assert img.dtype == np.uint8, 'Pillow backend only support uint8 type' - pil_image = Image.fromarray(img) - pil_image = pil_image.resize(size, pillow_interp_codes[interpolation]) - resized_img = np.array(pil_image) - else: - resized_img = cv2.resize( - img, size, dst=out, interpolation=cv2_interp_codes[interpolation]) - if not return_scale: - return resized_img - else: - w_scale = size[0] / w - h_scale = size[1] / h - return resized_img, w_scale, h_scale - - -def imresize_to_multiple(img, - divisor, - size=None, - scale_factor=None, - keep_ratio=False, - return_scale=False, - interpolation='bilinear', - out=None, - backend=None): - """Resize image according to a given size or scale factor and then rounds - up the the resized or rescaled image size to the nearest value that can be - divided by the divisor. - - Args: - img (ndarray): The input image. - divisor (int | tuple): Resized image size will be a multiple of - divisor. If divisor is a tuple, divisor should be - (w_divisor, h_divisor). - size (None | int | tuple[int]): Target size (w, h). Default: None. - scale_factor (None | float | tuple[float]): Multiplier for spatial - size. Should match input size if it is a tuple and the 2D style is - (w_scale_factor, h_scale_factor). Default: None. - keep_ratio (bool): Whether to keep the aspect ratio when resizing the - image. Default: False. - return_scale (bool): Whether to return `w_scale` and `h_scale`. - interpolation (str): Interpolation method, accepted values are - "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' - backend, "nearest", "bilinear" for 'pillow' backend. - out (ndarray): The output destination. - backend (str | None): The image resize backend type. Options are `cv2`, - `pillow`, `None`. If backend is None, the global imread_backend - specified by ``mmcv.use_backend()`` will be used. Default: None. - - Returns: - tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. - """ - h, w = img.shape[:2] - if size is not None and scale_factor is not None: - raise ValueError('only one of size or scale_factor should be defined') - elif size is None and scale_factor is None: - raise ValueError('one of size or scale_factor should be defined') - elif size is not None: - size = to_2tuple(size) - if keep_ratio: - size = rescale_size((w, h), size, return_scale=False) - else: - size = _scale_size((w, h), scale_factor) - - divisor = to_2tuple(divisor) - size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)]) - resized_img, w_scale, h_scale = imresize( - img, - size, - return_scale=True, - interpolation=interpolation, - out=out, - backend=backend) - if return_scale: - return resized_img, w_scale, h_scale - else: - return resized_img - - -def imresize_like(img, - dst_img, - return_scale=False, - interpolation='bilinear', - backend=None): - """Resize image to the same size of a given image. - - Args: - img (ndarray): The input image. - dst_img (ndarray): The target image. - return_scale (bool): Whether to return `w_scale` and `h_scale`. - interpolation (str): Same as :func:`resize`. - backend (str | None): Same as :func:`resize`. - - Returns: - tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or - `resized_img`. - """ - h, w = dst_img.shape[:2] - return imresize(img, (w, h), return_scale, interpolation, backend=backend) - - -def rescale_size(old_size, scale, return_scale=False): - """Calculate the new size to be rescaled to. - - Args: - old_size (tuple[int]): The old size (w, h) of image. - scale (float | tuple[int]): The scaling factor or maximum size. - If it is a float number, then the image will be rescaled by this - factor, else if it is a tuple of 2 integers, then the image will - be rescaled as large as possible within the scale. - return_scale (bool): Whether to return the scaling factor besides the - rescaled image size. - - Returns: - tuple[int]: The new rescaled image size. - """ - w, h = old_size - if isinstance(scale, (float, int)): - if scale <= 0: - raise ValueError(f'Invalid scale {scale}, must be positive.') - scale_factor = scale - elif isinstance(scale, tuple): - max_long_edge = max(scale) - max_short_edge = min(scale) - scale_factor = min(max_long_edge / max(h, w), - max_short_edge / min(h, w)) - else: - raise TypeError( - f'Scale must be a number or tuple of int, but got {type(scale)}') - - new_size = _scale_size((w, h), scale_factor) - - if return_scale: - return new_size, scale_factor - else: - return new_size - - -def imrescale(img, - scale, - return_scale=False, - interpolation='bilinear', - backend=None): - """Resize image while keeping the aspect ratio. - - Args: - img (ndarray): The input image. - scale (float | tuple[int]): The scaling factor or maximum size. - If it is a float number, then the image will be rescaled by this - factor, else if it is a tuple of 2 integers, then the image will - be rescaled as large as possible within the scale. - return_scale (bool): Whether to return the scaling factor besides the - rescaled image. - interpolation (str): Same as :func:`resize`. - backend (str | None): Same as :func:`resize`. - - Returns: - ndarray: The rescaled image. - """ - h, w = img.shape[:2] - new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) - rescaled_img = imresize( - img, new_size, interpolation=interpolation, backend=backend) - if return_scale: - return rescaled_img, scale_factor - else: - return rescaled_img - - -def imflip(img, direction='horizontal'): - """Flip an image horizontally or vertically. - - Args: - img (ndarray): Image to be flipped. - direction (str): The flip direction, either "horizontal" or - "vertical" or "diagonal". - - Returns: - ndarray: The flipped image. - """ - assert direction in ['horizontal', 'vertical', 'diagonal'] - if direction == 'horizontal': - return np.flip(img, axis=1) - elif direction == 'vertical': - return np.flip(img, axis=0) - else: - return np.flip(img, axis=(0, 1)) - - -def imflip_(img, direction='horizontal'): - """Inplace flip an image horizontally or vertically. - - Args: - img (ndarray): Image to be flipped. - direction (str): The flip direction, either "horizontal" or - "vertical" or "diagonal". - - Returns: - ndarray: The flipped image (inplace). - """ - assert direction in ['horizontal', 'vertical', 'diagonal'] - if direction == 'horizontal': - return cv2.flip(img, 1, img) - elif direction == 'vertical': - return cv2.flip(img, 0, img) - else: - return cv2.flip(img, -1, img) - - -def imrotate(img, - angle, - center=None, - scale=1.0, - border_value=0, - interpolation='bilinear', - auto_bound=False): - """Rotate an image. - - Args: - img (ndarray): Image to be rotated. - angle (float): Rotation angle in degrees, positive values mean - clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the rotation in - the source image. If not specified, the center of the image will be - used. - scale (float): Isotropic scale factor. - border_value (int): Border value. - interpolation (str): Same as :func:`resize`. - auto_bound (bool): Whether to adjust the image size to cover the whole - rotated image. - - Returns: - ndarray: The rotated image. - """ - if center is not None and auto_bound: - raise ValueError('`auto_bound` conflicts with `center`') - h, w = img.shape[:2] - if center is None: - center = ((w - 1) * 0.5, (h - 1) * 0.5) - assert isinstance(center, tuple) - - matrix = cv2.getRotationMatrix2D(center, -angle, scale) - if auto_bound: - cos = np.abs(matrix[0, 0]) - sin = np.abs(matrix[0, 1]) - new_w = h * sin + w * cos - new_h = h * cos + w * sin - matrix[0, 2] += (new_w - w) * 0.5 - matrix[1, 2] += (new_h - h) * 0.5 - w = int(np.round(new_w)) - h = int(np.round(new_h)) - rotated = cv2.warpAffine( - img, - matrix, (w, h), - flags=cv2_interp_codes[interpolation], - borderValue=border_value) - return rotated - - -def bbox_clip(bboxes, img_shape): - """Clip bboxes to fit the image shape. - - Args: - bboxes (ndarray): Shape (..., 4*k) - img_shape (tuple[int]): (height, width) of the image. - - Returns: - ndarray: Clipped bboxes. - """ - assert bboxes.shape[-1] % 4 == 0 - cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype) - cmin[0::2] = img_shape[1] - 1 - cmin[1::2] = img_shape[0] - 1 - clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0) - return clipped_bboxes - - -def bbox_scaling(bboxes, scale, clip_shape=None): - """Scaling bboxes w.r.t the box center. - - Args: - bboxes (ndarray): Shape(..., 4). - scale (float): Scaling factor. - clip_shape (tuple[int], optional): If specified, bboxes that exceed the - boundary will be clipped according to the given shape (h, w). - - Returns: - ndarray: Scaled bboxes. - """ - if float(scale) == 1.0: - scaled_bboxes = bboxes.copy() - else: - w = bboxes[..., 2] - bboxes[..., 0] + 1 - h = bboxes[..., 3] - bboxes[..., 1] + 1 - dw = (w * (scale - 1)) * 0.5 - dh = (h * (scale - 1)) * 0.5 - scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) - if clip_shape is not None: - return bbox_clip(scaled_bboxes, clip_shape) - else: - return scaled_bboxes - - -def imcrop(img, bboxes, scale=1.0, pad_fill=None): - """Crop image patches. - - 3 steps: scale the bboxes -> clip bboxes -> crop and pad. - - Args: - img (ndarray): Image to be cropped. - bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. - scale (float, optional): Scale ratio of bboxes, the default value - 1.0 means no padding. - pad_fill (Number | list[Number]): Value to be filled for padding. - Default: None, which means no padding. - - Returns: - list[ndarray] | ndarray: The cropped image patches. - """ - chn = 1 if img.ndim == 2 else img.shape[2] - if pad_fill is not None: - if isinstance(pad_fill, (int, float)): - pad_fill = [pad_fill for _ in range(chn)] - assert len(pad_fill) == chn - - _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes - scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) - clipped_bbox = bbox_clip(scaled_bboxes, img.shape) - - patches = [] - for i in range(clipped_bbox.shape[0]): - x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) - if pad_fill is None: - patch = img[y1:y2 + 1, x1:x2 + 1, ...] - else: - _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) - if chn == 1: - patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) - else: - patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) - patch = np.array( - pad_fill, dtype=img.dtype) * np.ones( - patch_shape, dtype=img.dtype) - x_start = 0 if _x1 >= 0 else -_x1 - y_start = 0 if _y1 >= 0 else -_y1 - w = x2 - x1 + 1 - h = y2 - y1 + 1 - patch[y_start:y_start + h, x_start:x_start + w, - ...] = img[y1:y1 + h, x1:x1 + w, ...] - patches.append(patch) - - if bboxes.ndim == 1: - return patches[0] - else: - return patches - - -def impad(img, - *, - shape=None, - padding=None, - pad_val=0, - padding_mode='constant'): - """Pad the given image to a certain shape or pad on all sides with - specified padding mode and padding value. - - Args: - img (ndarray): Image to be padded. - shape (tuple[int]): Expected padding shape (h, w). Default: None. - padding (int or tuple[int]): Padding on each border. If a single int is - provided this is used to pad all borders. If tuple of length 2 is - provided this is the padding on left/right and top/bottom - respectively. If a tuple of length 4 is provided this is the - padding for the left, top, right and bottom borders respectively. - Default: None. Note that `shape` and `padding` can not be both - set. - pad_val (Number | Sequence[Number]): Values to be filled in padding - areas when padding_mode is 'constant'. Default: 0. - padding_mode (str): Type of padding. Should be: constant, edge, - reflect or symmetric. Default: constant. - - - constant: pads with a constant value, this value is specified - with pad_val. - - edge: pads with the last value at the edge of the image. - - reflect: pads with reflection of image without repeating the - last value on the edge. For example, padding [1, 2, 3, 4] - with 2 elements on both sides in reflect mode will result - in [3, 2, 1, 2, 3, 4, 3, 2]. - - symmetric: pads with reflection of image repeating the last - value on the edge. For example, padding [1, 2, 3, 4] with - 2 elements on both sides in symmetric mode will result in - [2, 1, 1, 2, 3, 4, 4, 3] - - Returns: - ndarray: The padded image. - """ - - assert (shape is not None) ^ (padding is not None) - if shape is not None: - padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0]) - - # check pad_val - if isinstance(pad_val, tuple): - assert len(pad_val) == img.shape[-1] - elif not isinstance(pad_val, numbers.Number): - raise TypeError('pad_val must be a int or a tuple. ' - f'But received {type(pad_val)}') - - # check padding - if isinstance(padding, tuple) and len(padding) in [2, 4]: - if len(padding) == 2: - padding = (padding[0], padding[1], padding[0], padding[1]) - elif isinstance(padding, numbers.Number): - padding = (padding, padding, padding, padding) - else: - raise ValueError('Padding must be a int or a 2, or 4 element tuple.' - f'But received {padding}') - - # check padding mode - assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] - - border_type = { - 'constant': cv2.BORDER_CONSTANT, - 'edge': cv2.BORDER_REPLICATE, - 'reflect': cv2.BORDER_REFLECT_101, - 'symmetric': cv2.BORDER_REFLECT - } - img = cv2.copyMakeBorder( - img, - padding[1], - padding[3], - padding[0], - padding[2], - border_type[padding_mode], - value=pad_val) - - return img - - -def impad_to_multiple(img, divisor, pad_val=0): - """Pad an image to ensure each edge to be multiple to some number. - - Args: - img (ndarray): Image to be padded. - divisor (int): Padded image edges will be multiple to divisor. - pad_val (Number | Sequence[Number]): Same as :func:`impad`. - - Returns: - ndarray: The padded image. - """ - pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor - pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor - return impad(img, shape=(pad_h, pad_w), pad_val=pad_val) - - -def cutout(img, shape, pad_val=0): - """Randomly cut out a rectangle from the original img. - - Args: - img (ndarray): Image to be cutout. - shape (int | tuple[int]): Expected cutout shape (h, w). If given as a - int, the value will be used for both h and w. - pad_val (int | float | tuple[int | float]): Values to be filled in the - cut area. Defaults to 0. - - Returns: - ndarray: The cutout image. - """ - - channels = 1 if img.ndim == 2 else img.shape[2] - if isinstance(shape, int): - cut_h, cut_w = shape, shape - else: - assert isinstance(shape, tuple) and len(shape) == 2, \ - f'shape must be a int or a tuple with length 2, but got type ' \ - f'{type(shape)} instead.' - cut_h, cut_w = shape - if isinstance(pad_val, (int, float)): - pad_val = tuple([pad_val] * channels) - elif isinstance(pad_val, tuple): - assert len(pad_val) == channels, \ - 'Expected the num of elements in tuple equals the channels' \ - 'of input image. Found {} vs {}'.format( - len(pad_val), channels) - else: - raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`') - - img_h, img_w = img.shape[:2] - y0 = np.random.uniform(img_h) - x0 = np.random.uniform(img_w) - - y1 = int(max(0, y0 - cut_h / 2.)) - x1 = int(max(0, x0 - cut_w / 2.)) - y2 = min(img_h, y1 + cut_h) - x2 = min(img_w, x1 + cut_w) - - if img.ndim == 2: - patch_shape = (y2 - y1, x2 - x1) - else: - patch_shape = (y2 - y1, x2 - x1, channels) - - img_cutout = img.copy() - patch = np.array( - pad_val, dtype=img.dtype) * np.ones( - patch_shape, dtype=img.dtype) - img_cutout[y1:y2, x1:x2, ...] = patch - - return img_cutout - - -def _get_shear_matrix(magnitude, direction='horizontal'): - """Generate the shear matrix for transformation. - - Args: - magnitude (int | float): The magnitude used for shear. - direction (str): The flip direction, either "horizontal" - or "vertical". - - Returns: - ndarray: The shear matrix with dtype float32. - """ - if direction == 'horizontal': - shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]]) - elif direction == 'vertical': - shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]]) - return shear_matrix - - -def imshear(img, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear an image. - - Args: - img (ndarray): Image to be sheared with format (h, w) - or (h, w, c). - magnitude (int | float): The magnitude used for shear. - direction (str): The flip direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as :func:`resize`. - - Returns: - ndarray: The sheared image. - """ - assert direction in ['horizontal', - 'vertical'], f'Invalid direction: {direction}' - height, width = img.shape[:2] - if img.ndim == 2: - channels = 1 - elif img.ndim == 3: - channels = img.shape[-1] - if isinstance(border_value, int): - border_value = tuple([border_value] * channels) - elif isinstance(border_value, tuple): - assert len(border_value) == channels, \ - 'Expected the num of elements in tuple equals the channels' \ - 'of input image. Found {} vs {}'.format( - len(border_value), channels) - else: - raise ValueError( - f'Invalid type {type(border_value)} for `border_value`') - shear_matrix = _get_shear_matrix(magnitude, direction) - sheared = cv2.warpAffine( - img, - shear_matrix, - (width, height), - # Note case when the number elements in `border_value` - # greater than 3 (e.g. shearing masks whose channels large - # than 3) will raise TypeError in `cv2.warpAffine`. - # Here simply slice the first 3 values in `border_value`. - borderValue=border_value[:3], - flags=cv2_interp_codes[interpolation]) - return sheared - - -def _get_translate_matrix(offset, direction='horizontal'): - """Generate the translate matrix. - - Args: - offset (int | float): The offset used for translate. - direction (str): The translate direction, either - "horizontal" or "vertical". - - Returns: - ndarray: The translate matrix with dtype float32. - """ - if direction == 'horizontal': - translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]]) - elif direction == 'vertical': - translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]]) - return translate_matrix - - -def imtranslate(img, - offset, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Translate an image. - - Args: - img (ndarray): Image to be translated with format - (h, w) or (h, w, c). - offset (int | float): The offset used for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as :func:`resize`. - - Returns: - ndarray: The translated image. - """ - assert direction in ['horizontal', - 'vertical'], f'Invalid direction: {direction}' - height, width = img.shape[:2] - if img.ndim == 2: - channels = 1 - elif img.ndim == 3: - channels = img.shape[-1] - if isinstance(border_value, int): - border_value = tuple([border_value] * channels) - elif isinstance(border_value, tuple): - assert len(border_value) == channels, \ - 'Expected the num of elements in tuple equals the channels' \ - 'of input image. Found {} vs {}'.format( - len(border_value), channels) - else: - raise ValueError( - f'Invalid type {type(border_value)} for `border_value`.') - translate_matrix = _get_translate_matrix(offset, direction) - translated = cv2.warpAffine( - img, - translate_matrix, - (width, height), - # Note case when the number elements in `border_value` - # greater than 3 (e.g. translating masks whose channels - # large than 3) will raise TypeError in `cv2.warpAffine`. - # Here simply slice the first 3 values in `border_value`. - borderValue=border_value[:3], - flags=cv2_interp_codes[interpolation]) - return translated diff --git a/spaces/Potanin/12345/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/Potanin/12345/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000 --- a/spaces/Potanin/12345/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/PushkarA07/Sanskrit-Text-To-Speech/modules.py b/spaces/PushkarA07/Sanskrit-Text-To-Speech/modules.py deleted file mode 100644 index f5af1fd9a20dc03707889f360a39bb4b784a6df3..0000000000000000000000000000000000000000 --- a/spaces/PushkarA07/Sanskrit-Text-To-Speech/modules.py +++ /dev/null @@ -1,387 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/RMXK/RVC_HFF/lib/infer_pack/attentions.py b/spaces/RMXK/RVC_HFF/lib/infer_pack/attentions.py deleted file mode 100644 index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/lib/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_pack import commons -from lib.infer_pack import modules -from lib.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Rakot2223/faster-whisper-webui/src/languages.py b/spaces/Rakot2223/faster-whisper-webui/src/languages.py deleted file mode 100644 index fbad66e4d34119d27d12e3dfecbe99b6fdde4db7..0000000000000000000000000000000000000000 --- a/spaces/Rakot2223/faster-whisper-webui/src/languages.py +++ /dev/null @@ -1,147 +0,0 @@ -class Language(): - def __init__(self, code, name): - self.code = code - self.name = name - - def __str__(self): - return "Language(code={}, name={})".format(self.code, self.name) - -LANGUAGES = [ - Language('en', 'English'), - Language('zh', 'Chinese'), - Language('de', 'German'), - Language('es', 'Spanish'), - Language('ru', 'Russian'), - Language('ko', 'Korean'), - Language('fr', 'French'), - Language('ja', 'Japanese'), - Language('pt', 'Portuguese'), - Language('tr', 'Turkish'), - Language('pl', 'Polish'), - Language('ca', 'Catalan'), - Language('nl', 'Dutch'), - Language('ar', 'Arabic'), - Language('sv', 'Swedish'), - Language('it', 'Italian'), - Language('id', 'Indonesian'), - Language('hi', 'Hindi'), - Language('fi', 'Finnish'), - Language('vi', 'Vietnamese'), - Language('he', 'Hebrew'), - Language('uk', 'Ukrainian'), - Language('el', 'Greek'), - Language('ms', 'Malay'), - Language('cs', 'Czech'), - Language('ro', 'Romanian'), - Language('da', 'Danish'), - Language('hu', 'Hungarian'), - Language('ta', 'Tamil'), - Language('no', 'Norwegian'), - Language('th', 'Thai'), - Language('ur', 'Urdu'), - Language('hr', 'Croatian'), - Language('bg', 'Bulgarian'), - Language('lt', 'Lithuanian'), - Language('la', 'Latin'), - Language('mi', 'Maori'), - Language('ml', 'Malayalam'), - Language('cy', 'Welsh'), - Language('sk', 'Slovak'), - Language('te', 'Telugu'), - Language('fa', 'Persian'), - Language('lv', 'Latvian'), - Language('bn', 'Bengali'), - Language('sr', 'Serbian'), - Language('az', 'Azerbaijani'), - Language('sl', 'Slovenian'), - Language('kn', 'Kannada'), - Language('et', 'Estonian'), - Language('mk', 'Macedonian'), - Language('br', 'Breton'), - Language('eu', 'Basque'), - Language('is', 'Icelandic'), - Language('hy', 'Armenian'), - Language('ne', 'Nepali'), - Language('mn', 'Mongolian'), - Language('bs', 'Bosnian'), - Language('kk', 'Kazakh'), - Language('sq', 'Albanian'), - Language('sw', 'Swahili'), - Language('gl', 'Galician'), - Language('mr', 'Marathi'), - Language('pa', 'Punjabi'), - Language('si', 'Sinhala'), - Language('km', 'Khmer'), - Language('sn', 'Shona'), - Language('yo', 'Yoruba'), - Language('so', 'Somali'), - Language('af', 'Afrikaans'), - Language('oc', 'Occitan'), - Language('ka', 'Georgian'), - Language('be', 'Belarusian'), - Language('tg', 'Tajik'), - Language('sd', 'Sindhi'), - Language('gu', 'Gujarati'), - Language('am', 'Amharic'), - Language('yi', 'Yiddish'), - Language('lo', 'Lao'), - Language('uz', 'Uzbek'), - Language('fo', 'Faroese'), - Language('ht', 'Haitian creole'), - Language('ps', 'Pashto'), - Language('tk', 'Turkmen'), - Language('nn', 'Nynorsk'), - Language('mt', 'Maltese'), - Language('sa', 'Sanskrit'), - Language('lb', 'Luxembourgish'), - Language('my', 'Myanmar'), - Language('bo', 'Tibetan'), - Language('tl', 'Tagalog'), - Language('mg', 'Malagasy'), - Language('as', 'Assamese'), - Language('tt', 'Tatar'), - Language('haw', 'Hawaiian'), - Language('ln', 'Lingala'), - Language('ha', 'Hausa'), - Language('ba', 'Bashkir'), - Language('jw', 'Javanese'), - Language('su', 'Sundanese') -] - -_TO_LANGUAGE_CODE = { - **{language.code: language for language in LANGUAGES}, - "burmese": "my", - "valencian": "ca", - "flemish": "nl", - "haitian": "ht", - "letzeburgesch": "lb", - "pushto": "ps", - "panjabi": "pa", - "moldavian": "ro", - "moldovan": "ro", - "sinhalese": "si", - "castilian": "es", -} - -_FROM_LANGUAGE_NAME = { - **{language.name.lower(): language for language in LANGUAGES} -} - -def get_language_from_code(language_code, default=None) -> Language: - """Return the language name from the language code.""" - return _TO_LANGUAGE_CODE.get(language_code, default) - -def get_language_from_name(language, default=None) -> Language: - """Return the language code from the language name.""" - return _FROM_LANGUAGE_NAME.get(language.lower() if language else None, default) - -def get_language_names(): - """Return a list of language names.""" - return [language.name for language in LANGUAGES] - -if __name__ == "__main__": - # Test lookup - print(get_language_from_code('en')) - print(get_language_from_name('English')) - - print(get_language_names()) \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/__init__.py deleted file mode 100644 index 5563b5d55c7c5640c5882a0fd81915a055b50fdb..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import List, Optional - -__version__ = "22.3.1" - - -def main(args: Optional[List[str]] = None) -> int: - """This is an internal API only meant for use by pip's own console scripts. - - For additional details, see https://github.com/pypa/pip/issues/7498. - """ - from pip._internal.utils.entrypoints import _wrapper - - return _wrapper(args) diff --git a/spaces/Raspberry-ai/main/ai_converter.py b/spaces/Raspberry-ai/main/ai_converter.py deleted file mode 100644 index 996266166c094c93c98360b1a5bb6a25d7cc1e64..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/ai_converter.py +++ /dev/null @@ -1,78 +0,0 @@ -""" - JS function to convert an image from the gallery to .ai file. - - Conversion process entails the following steps: - 1. Extract primary image from galler - 2. Send image for conversion to convertio - 3. Check for conversion to complete - 4. Download .ai file - - Note: Using the convertio api works, but the .ai results are not great (e.g. parts of the dress are left out entirely). - https://onlineconvertfree.com/file-conversion-api/ API works much better, but the API usage failes due to CORS. Waiting on response from them to use their API :/ - -""" -convert_gallery_image_to_ai_js=""" - () => { - function downloadBase64File(contentType, base64Data, fileName) { - contentType="application/postscript"; - const linkSource = `data:${contentType};base64,${base64Data}`; - const downloadLink = document.createElement("a"); - downloadLink.href = linkSource; - downloadLink.download = fileName; - downloadLink.click(); - } - const convertToAiAndDownload = async (imgSrc) => { - console.log("starting conversion fr"); - var payload = { - 'apikey': 'ae1cff8f05ac2c190c345bb02fa150b2', - 'outputformat': 'ai', - 'input': "url", - "file": imgSrc, - }; - // console.log('set payload: ', payload); - // console.log('Sending request to start file conversion.'); - const response = await fetch('https://api.convertio.co/convert', { - method: 'POST', - body: JSON.stringify(payload), // string or object - headers: { - 'Content-Type': 'application/json', - 'accept': 'application/json', - } - }); - const responseJson = await response.json(); - console.log("responseJson:", responseJson); - const fileId = responseJson.data.id; - console.log("file id:", fileId); - fetchAndDownloadFile(fileId, 7); - } - // Hacky way of retrying if the converted file isn't ready. Probably a better way to do this. - const fetchAndDownloadFile = async (fileId, numAttempts) => { - url = `https://api.convertio.co/convert/${fileId}/dl`; - if (!numAttempts) { - console.log("Failed all attempts at getting converted file!"); - return "Failed"; - } - try { - const response = await fetch(url) - console.log('called fetch'); - const responseJson = await response.json(); - console.log('responsejson: ', responseJson); - downloadBase64File('application/postscript', responseJson.data.content, 'raspberry.ai'); - } catch(err) { - console.log("Failed to fetch converted file! Attempt :#", numAttempts); - numAttempts -= 1; - // Set a 1s delay between retries. Typically takes 3 secs to complete conversion. - setTimeout(function(){ - return fetchAndDownloadFile(fileId,numAttempts); - }, 1000); - return "Attempted again"; - } - return "Done"; - } - const gallery_element = document.getElementById("gallery"); - console.log("gallery_element:", gallery_element); - const primary_image_src = gallery_element.querySelector('img').src; - console.log("primary image:", primary_image_src); - convertToAiAndDownload(primary_image_src); - - }""" \ No newline at end of file diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/utils/common.py b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/utils/common.py deleted file mode 100644 index aa2007b0b31df0325c51f4112a259ab1e1d7f1aa..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/datasets/utils/common.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -""" -Copyright 2017, Zixin Luo, HKUST. -Commonly used functions -""" - -from __future__ import print_function - -import os -from datetime import datetime - - -class ClassProperty(property): - """For dynamically obtaining system time""" - - def __get__(self, cls, owner): - return classmethod(self.fget).__get__(None, owner)() - - -class Notify(object): - """Colorful printing prefix. - A quick example: - print(Notify.INFO, YOUR TEXT, Notify.ENDC) - """ - - def __init__(self): - pass - - @ClassProperty - def HEADER(cls): - return str(datetime.now()) + ": \033[95m" - - @ClassProperty - def INFO(cls): - return str(datetime.now()) + ": \033[92mI" - - @ClassProperty - def OKBLUE(cls): - return str(datetime.now()) + ": \033[94m" - - @ClassProperty - def WARNING(cls): - return str(datetime.now()) + ": \033[93mW" - - @ClassProperty - def FAIL(cls): - return str(datetime.now()) + ": \033[91mF" - - @ClassProperty - def BOLD(cls): - return str(datetime.now()) + ": \033[1mB" - - @ClassProperty - def UNDERLINE(cls): - return str(datetime.now()) + ": \033[4mU" - - ENDC = "\033[0m" diff --git a/spaces/Realcat/image-matching-webui/third_party/SOLD2/sold2/dataset/holicity_dataset.py b/spaces/Realcat/image-matching-webui/third_party/SOLD2/sold2/dataset/holicity_dataset.py deleted file mode 100644 index af182c5ef46d68d595da4c3dda76c1f631d56fcc..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/SOLD2/sold2/dataset/holicity_dataset.py +++ /dev/null @@ -1,852 +0,0 @@ -""" -File to process and load the Holicity dataset. -""" -import os -import math -import copy -import PIL -import numpy as np -import h5py -import cv2 -import pickle -from skimage.io import imread -from skimage import color -import torch -import torch.utils.data.dataloader as torch_loader -from torch.utils.data import Dataset -from torchvision import transforms - -from ..config.project_config import Config as cfg -from .transforms import photometric_transforms as photoaug -from .transforms import homographic_transforms as homoaug -from .transforms.utils import random_scaling -from .synthetic_util import get_line_heatmap -from ..misc.geometry_utils import warp_points, mask_points -from ..misc.train_utils import parse_h5_data - - -def holicity_collate_fn(batch): - """Customized collate_fn.""" - batch_keys = [ - "image", - "junction_map", - "valid_mask", - "heatmap", - "heatmap_pos", - "heatmap_neg", - "homography", - "line_points", - "line_indices", - ] - list_keys = ["junctions", "line_map", "line_map_pos", "line_map_neg", "file_key"] - - outputs = {} - for data_key in batch[0].keys(): - batch_match = sum([_ in data_key for _ in batch_keys]) - list_match = sum([_ in data_key for _ in list_keys]) - # print(batch_match, list_match) - if batch_match > 0 and list_match == 0: - outputs[data_key] = torch_loader.default_collate( - [b[data_key] for b in batch] - ) - elif batch_match == 0 and list_match > 0: - outputs[data_key] = [b[data_key] for b in batch] - elif batch_match == 0 and list_match == 0: - continue - else: - raise ValueError( - "[Error] A key matches batch keys and list keys simultaneously." - ) - - return outputs - - -class HolicityDataset(Dataset): - def __init__(self, mode="train", config=None): - super(HolicityDataset, self).__init__() - if not mode in ["train", "test"]: - raise ValueError( - "[Error] Unknown mode for Holicity dataset. Only 'train' and 'test'." - ) - self.mode = mode - - if config is None: - self.config = self.get_default_config() - else: - self.config = config - # Also get the default config - self.default_config = self.get_default_config() - - # Get cache setting - self.dataset_name = self.get_dataset_name() - self.cache_name = self.get_cache_name() - self.cache_path = cfg.holicity_cache_path - - # Get the ground truth source if it exists - self.gt_source = None - if "gt_source_%s" % (self.mode) in self.config: - self.gt_source = self.config.get("gt_source_%s" % (self.mode)) - self.gt_source = os.path.join(cfg.export_dataroot, self.gt_source) - # Check the full path exists - if not os.path.exists(self.gt_source): - raise ValueError( - "[Error] The specified ground truth source does not exist." - ) - - # Get the filename dataset - print("[Info] Initializing Holicity dataset...") - self.filename_dataset, self.datapoints = self.construct_dataset() - - # Get dataset length - self.dataset_length = len(self.datapoints) - - # Print some info - print("[Info] Successfully initialized dataset") - print("\t Name: Holicity") - print("\t Mode: %s" % (self.mode)) - print("\t Gt: %s" % (self.config.get("gt_source_%s" % (self.mode), "None"))) - print("\t Counts: %d" % (self.dataset_length)) - print("----------------------------------------") - - ####################################### - ## Dataset construction related APIs ## - ####################################### - def construct_dataset(self): - """Construct the dataset (from scratch or from cache).""" - # Check if the filename cache exists - # If cache exists, load from cache - if self.check_dataset_cache(): - print( - "\t Found filename cache %s at %s" % (self.cache_name, self.cache_path) - ) - print("\t Load filename cache...") - filename_dataset, datapoints = self.get_filename_dataset_from_cache() - # If not, initialize dataset from scratch - else: - print("\t Can't find filename cache ...") - print("\t Create filename dataset from scratch...") - filename_dataset, datapoints = self.get_filename_dataset() - print("\t Create filename dataset cache...") - self.create_filename_dataset_cache(filename_dataset, datapoints) - - return filename_dataset, datapoints - - def create_filename_dataset_cache(self, filename_dataset, datapoints): - """Create filename dataset cache for faster initialization.""" - # Check cache path exists - if not os.path.exists(self.cache_path): - os.makedirs(self.cache_path) - - cache_file_path = os.path.join(self.cache_path, self.cache_name) - data = {"filename_dataset": filename_dataset, "datapoints": datapoints} - with open(cache_file_path, "wb") as f: - pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) - - def get_filename_dataset_from_cache(self): - """Get filename dataset from cache.""" - # Load from pkl cache - cache_file_path = os.path.join(self.cache_path, self.cache_name) - with open(cache_file_path, "rb") as f: - data = pickle.load(f) - - return data["filename_dataset"], data["datapoints"] - - def get_filename_dataset(self): - """Get the path to the dataset.""" - if self.mode == "train": - # Contains 5720 or 11872 images - dataset_path = [ - os.path.join(cfg.holicity_dataroot, p) - for p in self.config["train_splits"] - ] - else: - # Test mode - Contains 520 images - dataset_path = [os.path.join(cfg.holicity_dataroot, "2018-03")] - - # Get paths to all image files - image_paths = [] - for folder in dataset_path: - image_paths += [ - os.path.join(folder, img) - for img in os.listdir(folder) - if os.path.splitext(img)[-1] == ".jpg" - ] - image_paths = sorted(image_paths) - - # Verify all the images exist - for idx in range(len(image_paths)): - image_path = image_paths[idx] - if not (os.path.exists(image_path)): - raise ValueError("[Error] The image does not exist. %s" % (image_path)) - - # Construct the filename dataset - num_pad = int(math.ceil(math.log10(len(image_paths))) + 1) - filename_dataset = {} - for idx in range(len(image_paths)): - # Get the file key - key = self.get_padded_filename(num_pad, idx) - - filename_dataset[key] = {"image": image_paths[idx]} - - # Get the datapoints - datapoints = list(sorted(filename_dataset.keys())) - - return filename_dataset, datapoints - - def get_dataset_name(self): - """Get dataset name from dataset config / default config.""" - dataset_name = self.config.get( - "dataset_name", self.default_config["dataset_name"] - ) - dataset_name = dataset_name + "_%s" % self.mode - return dataset_name - - def get_cache_name(self): - """Get cache name from dataset config / default config.""" - dataset_name = self.config.get( - "dataset_name", self.default_config["dataset_name"] - ) - dataset_name = dataset_name + "_%s" % self.mode - # Compose cache name - cache_name = dataset_name + "_cache.pkl" - return cache_name - - def check_dataset_cache(self): - """Check if dataset cache exists.""" - cache_file_path = os.path.join(self.cache_path, self.cache_name) - if os.path.exists(cache_file_path): - return True - else: - return False - - @staticmethod - def get_padded_filename(num_pad, idx): - """Get the padded filename using adaptive padding.""" - file_len = len("%d" % (idx)) - filename = "0" * (num_pad - file_len) + "%d" % (idx) - return filename - - def get_default_config(self): - """Get the default configuration.""" - return { - "dataset_name": "holicity", - "train_split": "2018-01", - "add_augmentation_to_all_splits": False, - "preprocessing": {"resize": [512, 512], "blur_size": 11}, - "augmentation": { - "photometric": {"enable": False}, - "homographic": {"enable": False}, - }, - } - - ############################################ - ## Pytorch and preprocessing related APIs ## - ############################################ - @staticmethod - def get_data_from_path(data_path): - """Get data from the information from filename dataset.""" - output = {} - - # Get image data - image_path = data_path["image"] - image = imread(image_path) - output["image"] = image - - return output - - @staticmethod - def convert_line_map(lcnn_line_map, num_junctions): - """Convert the line_pos or line_neg - (represented by two junction indexes) to our line map.""" - # Initialize empty line map - line_map = np.zeros([num_junctions, num_junctions]) - - # Iterate through all the lines - for idx in range(lcnn_line_map.shape[0]): - index1 = lcnn_line_map[idx, 0] - index2 = lcnn_line_map[idx, 1] - - line_map[index1, index2] = 1 - line_map[index2, index1] = 1 - - return line_map - - @staticmethod - def junc_to_junc_map(junctions, image_size): - """Convert junction points to junction maps.""" - junctions = np.round(junctions).astype(np.int) - # Clip the boundary by image size - junctions[:, 0] = np.clip(junctions[:, 0], 0.0, image_size[0] - 1) - junctions[:, 1] = np.clip(junctions[:, 1], 0.0, image_size[1] - 1) - - # Create junction map - junc_map = np.zeros([image_size[0], image_size[1]]) - junc_map[junctions[:, 0], junctions[:, 1]] = 1 - - return junc_map[..., None].astype(np.int) - - def parse_transforms(self, names, all_transforms): - """Parse the transform.""" - trans = ( - all_transforms - if (names == "all") - else (names if isinstance(names, list) else [names]) - ) - assert set(trans) <= set(all_transforms) - return trans - - def get_photo_transform(self): - """Get list of photometric transforms (according to the config).""" - # Get the photometric transform config - photo_config = self.config["augmentation"]["photometric"] - if not photo_config["enable"]: - raise ValueError("[Error] Photometric augmentation is not enabled.") - - # Parse photometric transforms - trans_lst = self.parse_transforms( - photo_config["primitives"], photoaug.available_augmentations - ) - trans_config_lst = [photo_config["params"].get(p, {}) for p in trans_lst] - - # List of photometric augmentation - photometric_trans_lst = [ - getattr(photoaug, trans)(**conf) - for (trans, conf) in zip(trans_lst, trans_config_lst) - ] - - return photometric_trans_lst - - def get_homo_transform(self): - """Get homographic transforms (according to the config).""" - # Get homographic transforms for image - homo_config = self.config["augmentation"]["homographic"]["params"] - if not self.config["augmentation"]["homographic"]["enable"]: - raise ValueError("[Error] Homographic augmentation is not enabled") - - # Parse the homographic transforms - image_shape = self.config["preprocessing"]["resize"] - - # Compute the min_label_len from config - try: - min_label_tmp = self.config["generation"]["min_label_len"] - except: - min_label_tmp = None - - # float label len => fraction - if isinstance(min_label_tmp, float): # Skip if not provided - min_label_len = min_label_tmp * min(image_shape) - # int label len => length in pixel - elif isinstance(min_label_tmp, int): - scale_ratio = ( - self.config["preprocessing"]["resize"] - / self.config["generation"]["image_size"][0] - ) - min_label_len = self.config["generation"]["min_label_len"] * scale_ratio - # if none => no restriction - else: - min_label_len = 0 - - # Initialize the transform - homographic_trans = homoaug.homography_transform( - image_shape, homo_config, 0, min_label_len - ) - - return homographic_trans - - def get_line_points( - self, junctions, line_map, H1=None, H2=None, img_size=None, warp=False - ): - """Sample evenly points along each line segments - and keep track of line idx.""" - if np.sum(line_map) == 0: - # No segment detected in the image - line_indices = np.zeros(self.config["max_pts"], dtype=int) - line_points = np.zeros((self.config["max_pts"], 2), dtype=float) - return line_points, line_indices - - # Extract all pairs of connected junctions - junc_indices = np.array( - [[i, j] for (i, j) in zip(*np.where(line_map)) if j > i] - ) - line_segments = np.stack( - [junctions[junc_indices[:, 0]], junctions[junc_indices[:, 1]]], axis=1 - ) - # line_segments is (num_lines, 2, 2) - line_lengths = np.linalg.norm(line_segments[:, 0] - line_segments[:, 1], axis=1) - - # Sample the points separated by at least min_dist_pts along each line - # The number of samples depends on the length of the line - num_samples = np.minimum( - line_lengths // self.config["min_dist_pts"], self.config["max_num_samples"] - ) - line_points = [] - line_indices = [] - cur_line_idx = 1 - for n in np.arange(2, self.config["max_num_samples"] + 1): - # Consider all lines where we can fit up to n points - cur_line_seg = line_segments[num_samples == n] - line_points_x = np.linspace( - cur_line_seg[:, 0, 0], cur_line_seg[:, 1, 0], n, axis=-1 - ).flatten() - line_points_y = np.linspace( - cur_line_seg[:, 0, 1], cur_line_seg[:, 1, 1], n, axis=-1 - ).flatten() - jitter = self.config.get("jittering", 0) - if jitter: - # Add a small random jittering of all points along the line - angles = np.arctan2( - cur_line_seg[:, 1, 0] - cur_line_seg[:, 0, 0], - cur_line_seg[:, 1, 1] - cur_line_seg[:, 0, 1], - ).repeat(n) - jitter_hyp = (np.random.rand(len(angles)) * 2 - 1) * jitter - line_points_x += jitter_hyp * np.sin(angles) - line_points_y += jitter_hyp * np.cos(angles) - line_points.append(np.stack([line_points_x, line_points_y], axis=-1)) - # Keep track of the line indices for each sampled point - num_cur_lines = len(cur_line_seg) - line_idx = np.arange(cur_line_idx, cur_line_idx + num_cur_lines) - line_indices.append(line_idx.repeat(n)) - cur_line_idx += num_cur_lines - line_points = np.concatenate(line_points, axis=0)[: self.config["max_pts"]] - line_indices = np.concatenate(line_indices, axis=0)[: self.config["max_pts"]] - - # Warp the points if need be, and filter unvalid ones - # If the other view is also warped - if warp and H2 is not None: - warp_points2 = warp_points(line_points, H2) - line_points = warp_points(line_points, H1) - mask = mask_points(line_points, img_size) - mask2 = mask_points(warp_points2, img_size) - mask = mask * mask2 - # If the other view is not warped - elif warp and H2 is None: - line_points = warp_points(line_points, H1) - mask = mask_points(line_points, img_size) - else: - if H1 is not None: - raise ValueError("[Error] Wrong combination of homographies.") - # Remove points that would be outside of img_size if warped by H - warped_points = warp_points(line_points, H1) - mask = mask_points(warped_points, img_size) - line_points = line_points[mask] - line_indices = line_indices[mask] - - # Pad the line points to a fixed length - # Index of 0 means padded line - line_indices = np.concatenate( - [line_indices, np.zeros(self.config["max_pts"] - len(line_indices))], axis=0 - ) - line_points = np.concatenate( - [ - line_points, - np.zeros((self.config["max_pts"] - len(line_points), 2), dtype=float), - ], - axis=0, - ) - - return line_points, line_indices - - def export_preprocessing(self, data, numpy=False): - """Preprocess the exported data.""" - # Fetch the corresponding entries - image = data["image"] - image_size = image.shape[:2] - - # Resize the image before photometric and homographical augmentations - if not (list(image_size) == self.config["preprocessing"]["resize"]): - # Resize the image and the point location. - size_old = list(image.shape)[:2] # Only H and W dimensions - - image = cv2.resize( - image, - tuple(self.config["preprocessing"]["resize"][::-1]), - interpolation=cv2.INTER_LINEAR, - ) - image = np.array(image, dtype=np.uint8) - - # Optionally convert the image to grayscale - if self.config["gray_scale"]: - image = (color.rgb2gray(image) * 255.0).astype(np.uint8) - - image = photoaug.normalize_image()(image) - - # Convert to tensor and return the results - to_tensor = transforms.ToTensor() - if not numpy: - return {"image": to_tensor(image)} - else: - return {"image": image} - - def train_preprocessing_exported( - self, - data, - numpy=False, - disable_homoaug=False, - desc_training=False, - H1=None, - H1_scale=None, - H2=None, - scale=1.0, - h_crop=None, - w_crop=None, - ): - """Train preprocessing for the exported labels.""" - data = copy.deepcopy(data) - # Fetch the corresponding entries - image = data["image"] - junctions = data["junctions"] - line_map = data["line_map"] - image_size = image.shape[:2] - - # Define the random crop for scaling if necessary - if h_crop is None or w_crop is None: - h_crop, w_crop = 0, 0 - if scale > 1: - H, W = self.config["preprocessing"]["resize"] - H_scale, W_scale = round(H * scale), round(W * scale) - if H_scale > H: - h_crop = np.random.randint(H_scale - H) - if W_scale > W: - w_crop = np.random.randint(W_scale - W) - - # Resize the image before photometric and homographical augmentations - if not (list(image_size) == self.config["preprocessing"]["resize"]): - # Resize the image and the point location. - size_old = list(image.shape)[:2] # Only H and W dimensions - - image = cv2.resize( - image, - tuple(self.config["preprocessing"]["resize"][::-1]), - interpolation=cv2.INTER_LINEAR, - ) - image = np.array(image, dtype=np.uint8) - - # # In HW format - # junctions = (junctions * np.array( - # self.config['preprocessing']['resize'], np.float) - # / np.array(size_old, np.float)) - - # Generate the line heatmap after post-processing - junctions_xy = np.flip(np.round(junctions).astype(np.int32), axis=1) - image_size = image.shape[:2] - heatmap = get_line_heatmap(junctions_xy, line_map, image_size) - - # Optionally convert the image to grayscale - if self.config["gray_scale"]: - image = (color.rgb2gray(image) * 255.0).astype(np.uint8) - - # Check if we need to apply augmentations - # In training mode => yes. - # In homography adaptation mode (export mode) => No - if self.config["augmentation"]["photometric"]["enable"]: - photo_trans_lst = self.get_photo_transform() - ### Image transform ### - np.random.shuffle(photo_trans_lst) - image_transform = transforms.Compose( - photo_trans_lst + [photoaug.normalize_image()] - ) - else: - image_transform = photoaug.normalize_image() - image = image_transform(image) - - # Perform the random scaling - if scale != 1.0: - image, junctions, line_map, valid_mask = random_scaling( - image, junctions, line_map, scale, h_crop=h_crop, w_crop=w_crop - ) - else: - # Declare default valid mask (all ones) - valid_mask = np.ones(image_size) - - # Initialize the empty output dict - outputs = {} - # Convert to tensor and return the results - to_tensor = transforms.ToTensor() - - # Check homographic augmentation - warp = ( - self.config["augmentation"]["homographic"]["enable"] - and disable_homoaug == False - ) - if warp: - homo_trans = self.get_homo_transform() - # Perform homographic transform - if H1 is None: - homo_outputs = homo_trans( - image, junctions, line_map, valid_mask=valid_mask - ) - else: - homo_outputs = homo_trans( - image, - junctions, - line_map, - homo=H1, - scale=H1_scale, - valid_mask=valid_mask, - ) - homography_mat = homo_outputs["homo"] - - # Give the warp of the other view - if H1 is None: - H1 = homo_outputs["homo"] - - # Sample points along each line segments for the descriptor - if desc_training: - line_points, line_indices = self.get_line_points( - junctions, line_map, H1=H1, H2=H2, img_size=image_size, warp=warp - ) - - # Record the warped results - if warp: - junctions = homo_outputs["junctions"] # Should be HW format - image = homo_outputs["warped_image"] - line_map = homo_outputs["line_map"] - valid_mask = homo_outputs["valid_mask"] # Same for pos and neg - heatmap = homo_outputs["warped_heatmap"] - - # Optionally put warping information first. - if not numpy: - outputs["homography_mat"] = to_tensor(homography_mat).to(torch.float32)[ - 0, ... - ] - else: - outputs["homography_mat"] = homography_mat.astype(np.float32) - - junction_map = self.junc_to_junc_map(junctions, image_size) - - if not numpy: - outputs.update( - { - "image": to_tensor(image), - "junctions": to_tensor(junctions).to(torch.float32)[0, ...], - "junction_map": to_tensor(junction_map).to(torch.int), - "line_map": to_tensor(line_map).to(torch.int32)[0, ...], - "heatmap": to_tensor(heatmap).to(torch.int32), - "valid_mask": to_tensor(valid_mask).to(torch.int32), - } - ) - if desc_training: - outputs.update( - { - "line_points": to_tensor(line_points).to(torch.float32)[0], - "line_indices": torch.tensor(line_indices, dtype=torch.int), - } - ) - else: - outputs.update( - { - "image": image, - "junctions": junctions.astype(np.float32), - "junction_map": junction_map.astype(np.int32), - "line_map": line_map.astype(np.int32), - "heatmap": heatmap.astype(np.int32), - "valid_mask": valid_mask.astype(np.int32), - } - ) - if desc_training: - outputs.update( - { - "line_points": line_points.astype(np.float32), - "line_indices": line_indices.astype(int), - } - ) - - return outputs - - def preprocessing_exported_paired_desc(self, data, numpy=False, scale=1.0): - """Train preprocessing for paired data for the exported labels - for descriptor training.""" - outputs = {} - - # Define the random crop for scaling if necessary - h_crop, w_crop = 0, 0 - if scale > 1: - H, W = self.config["preprocessing"]["resize"] - H_scale, W_scale = round(H * scale), round(W * scale) - if H_scale > H: - h_crop = np.random.randint(H_scale - H) - if W_scale > W: - w_crop = np.random.randint(W_scale - W) - - # Sample ref homography first - homo_config = self.config["augmentation"]["homographic"]["params"] - image_shape = self.config["preprocessing"]["resize"] - ref_H, ref_scale = homoaug.sample_homography(image_shape, **homo_config) - - # Data for target view (All augmentation) - target_data = self.train_preprocessing_exported( - data, - numpy=numpy, - desc_training=True, - H1=None, - H2=ref_H, - scale=scale, - h_crop=h_crop, - w_crop=w_crop, - ) - - # Data for reference view (No homographical augmentation) - ref_data = self.train_preprocessing_exported( - data, - numpy=numpy, - desc_training=True, - H1=ref_H, - H1_scale=ref_scale, - H2=target_data["homography_mat"].numpy(), - scale=scale, - h_crop=h_crop, - w_crop=w_crop, - ) - - # Spread ref data - for key, val in ref_data.items(): - outputs["ref_" + key] = val - - # Spread target data - for key, val in target_data.items(): - outputs["target_" + key] = val - - return outputs - - def test_preprocessing_exported(self, data, numpy=False): - """Test preprocessing for the exported labels.""" - data = copy.deepcopy(data) - # Fetch the corresponding entries - image = data["image"] - junctions = data["junctions"] - line_map = data["line_map"] - image_size = image.shape[:2] - - # Resize the image before photometric and homographical augmentations - if not (list(image_size) == self.config["preprocessing"]["resize"]): - # Resize the image and the point location. - size_old = list(image.shape)[:2] # Only H and W dimensions - - image = cv2.resize( - image, - tuple(self.config["preprocessing"]["resize"][::-1]), - interpolation=cv2.INTER_LINEAR, - ) - image = np.array(image, dtype=np.uint8) - - # # In HW format - # junctions = (junctions * np.array( - # self.config['preprocessing']['resize'], np.float) - # / np.array(size_old, np.float)) - - # Optionally convert the image to grayscale - if self.config["gray_scale"]: - image = (color.rgb2gray(image) * 255.0).astype(np.uint8) - - # Still need to normalize image - image_transform = photoaug.normalize_image() - image = image_transform(image) - - # Generate the line heatmap after post-processing - junctions_xy = np.flip(np.round(junctions).astype(np.int32), axis=1) - image_size = image.shape[:2] - heatmap = get_line_heatmap(junctions_xy, line_map, image_size) - - # Declare default valid mask (all ones) - valid_mask = np.ones(image_size) - - junction_map = self.junc_to_junc_map(junctions, image_size) - - # Convert to tensor and return the results - to_tensor = transforms.ToTensor() - if not numpy: - outputs = { - "image": to_tensor(image), - "junctions": to_tensor(junctions).to(torch.float32)[0, ...], - "junction_map": to_tensor(junction_map).to(torch.int), - "line_map": to_tensor(line_map).to(torch.int32)[0, ...], - "heatmap": to_tensor(heatmap).to(torch.int32), - "valid_mask": to_tensor(valid_mask).to(torch.int32), - } - else: - outputs = { - "image": image, - "junctions": junctions.astype(np.float32), - "junction_map": junction_map.astype(np.int32), - "line_map": line_map.astype(np.int32), - "heatmap": heatmap.astype(np.int32), - "valid_mask": valid_mask.astype(np.int32), - } - - return outputs - - def __len__(self): - return self.dataset_length - - def get_data_from_key(self, file_key): - """Get data from file_key.""" - # Check key exists - if not file_key in self.filename_dataset.keys(): - raise ValueError("[Error] the specified key is not in the dataset.") - - # Get the data paths - data_path = self.filename_dataset[file_key] - # Read in the image and npz labels - data = self.get_data_from_path(data_path) - - # Perform transform and augmentation - if self.mode == "train" or self.config["add_augmentation_to_all_splits"]: - data = self.train_preprocessing(data, numpy=True) - else: - data = self.test_preprocessing(data, numpy=True) - - # Add file key to the output - data["file_key"] = file_key - - return data - - def __getitem__(self, idx): - """Return data - file_key: str, keys used to retrieve data from the filename dataset. - image: torch.float, C*H*W range 0~1, - junctions: torch.float, N*2, - junction_map: torch.int32, 1*H*W range 0 or 1, - line_map: torch.int32, N*N range 0 or 1, - heatmap: torch.int32, 1*H*W range 0 or 1, - valid_mask: torch.int32, 1*H*W range 0 or 1 - """ - # Get the corresponding datapoint and contents from filename dataset - file_key = self.datapoints[idx] - data_path = self.filename_dataset[file_key] - # Read in the image and npz labels - data = self.get_data_from_path(data_path) - - if self.gt_source: - with h5py.File(self.gt_source, "r") as f: - exported_label = parse_h5_data(f[file_key]) - - data["junctions"] = exported_label["junctions"] - data["line_map"] = exported_label["line_map"] - - # Perform transform and augmentation - return_type = self.config.get("return_type", "single") - if self.gt_source is None: - # For export only - data = self.export_preprocessing(data) - elif self.mode == "train" or self.config["add_augmentation_to_all_splits"]: - # Perform random scaling first - if self.config["augmentation"]["random_scaling"]["enable"]: - scale_range = self.config["augmentation"]["random_scaling"]["range"] - # Decide the scaling - scale = np.random.uniform(min(scale_range), max(scale_range)) - else: - scale = 1.0 - if self.mode == "train" and return_type == "paired_desc": - data = self.preprocessing_exported_paired_desc(data, scale=scale) - else: - data = self.train_preprocessing_exported(data, scale=scale) - else: - if return_type == "paired_desc": - data = self.preprocessing_exported_paired_desc(data) - else: - data = self.test_preprocessing_exported(data) - - # Add file key to the output - data["file_key"] = file_key - - return data diff --git a/spaces/Reself/StableVideo/annotator/midas/utils.py b/spaces/Reself/StableVideo/annotator/midas/utils.py deleted file mode 100644 index 9a9d3b5b66370fa98da9e067ba53ead848ea9a59..0000000000000000000000000000000000000000 --- a/spaces/Reself/StableVideo/annotator/midas/utils.py +++ /dev/null @@ -1,189 +0,0 @@ -"""Utils for monoDepth.""" -import sys -import re -import numpy as np -import cv2 -import torch - - -def read_pfm(path): - """Read pfm file. - - Args: - path (str): path to file - - Returns: - tuple: (data, scale) - """ - with open(path, "rb") as file: - - color = None - width = None - height = None - scale = None - endian = None - - header = file.readline().rstrip() - if header.decode("ascii") == "PF": - color = True - elif header.decode("ascii") == "Pf": - color = False - else: - raise Exception("Not a PFM file: " + path) - - dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) - if dim_match: - width, height = list(map(int, dim_match.groups())) - else: - raise Exception("Malformed PFM header.") - - scale = float(file.readline().decode("ascii").rstrip()) - if scale < 0: - # little-endian - endian = "<" - scale = -scale - else: - # big-endian - endian = ">" - - data = np.fromfile(file, endian + "f") - shape = (height, width, 3) if color else (height, width) - - data = np.reshape(data, shape) - data = np.flipud(data) - - return data, scale - - -def write_pfm(path, image, scale=1): - """Write pfm file. - - Args: - path (str): pathto file - image (array): data - scale (int, optional): Scale. Defaults to 1. - """ - - with open(path, "wb") as file: - color = None - - if image.dtype.name != "float32": - raise Exception("Image dtype must be float32.") - - image = np.flipud(image) - - if len(image.shape) == 3 and image.shape[2] == 3: # color image - color = True - elif ( - len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1 - ): # greyscale - color = False - else: - raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") - - file.write("PF\n" if color else "Pf\n".encode()) - file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) - - endian = image.dtype.byteorder - - if endian == "<" or endian == "=" and sys.byteorder == "little": - scale = -scale - - file.write("%f\n".encode() % scale) - - image.tofile(file) - - -def read_image(path): - """Read image and output RGB image (0-1). - - Args: - path (str): path to file - - Returns: - array: RGB image (0-1) - """ - img = cv2.imread(path) - - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 - - return img - - -def resize_image(img): - """Resize image and make it fit for network. - - Args: - img (array): image - - Returns: - tensor: data ready for network - """ - height_orig = img.shape[0] - width_orig = img.shape[1] - - if width_orig > height_orig: - scale = width_orig / 384 - else: - scale = height_orig / 384 - - height = (np.ceil(height_orig / scale / 32) * 32).astype(int) - width = (np.ceil(width_orig / scale / 32) * 32).astype(int) - - img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) - - img_resized = ( - torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() - ) - img_resized = img_resized.unsqueeze(0) - - return img_resized - - -def resize_depth(depth, width, height): - """Resize depth map and bring to CPU (numpy). - - Args: - depth (tensor): depth - width (int): image width - height (int): image height - - Returns: - array: processed depth - """ - depth = torch.squeeze(depth[0, :, :, :]).to("cpu") - - depth_resized = cv2.resize( - depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC - ) - - return depth_resized - -def write_depth(path, depth, bits=1): - """Write depth map to pfm and png file. - - Args: - path (str): filepath without extension - depth (array): depth - """ - write_pfm(path + ".pfm", depth.astype(np.float32)) - - depth_min = depth.min() - depth_max = depth.max() - - max_val = (2**(8*bits))-1 - - if depth_max - depth_min > np.finfo("float").eps: - out = max_val * (depth - depth_min) / (depth_max - depth_min) - else: - out = np.zeros(depth.shape, dtype=depth.type) - - if bits == 1: - cv2.imwrite(path + ".png", out.astype("uint8")) - elif bits == 2: - cv2.imwrite(path + ".png", out.astype("uint16")) - - return diff --git a/spaces/Reself/StableVideo/ldm/modules/diffusionmodules/model.py b/spaces/Reself/StableVideo/ldm/modules/diffusionmodules/model.py deleted file mode 100644 index b089eebbe1676d8249005bb9def002ff5180715b..0000000000000000000000000000000000000000 --- a/spaces/Reself/StableVideo/ldm/modules/diffusionmodules/model.py +++ /dev/null @@ -1,852 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np -from einops import rearrange -from typing import Optional, Any - -from ldm.modules.attention import MemoryEfficientCrossAttention - -try: - import xformers - import xformers.ops - XFORMERS_IS_AVAILBLE = True -except: - XFORMERS_IS_AVAILBLE = False - print("No module 'xformers'. Proceeding without it.") - - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - -class MemoryEfficientAttnBlock(nn.Module): - """ - Uses xformers efficient implementation, - see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - Note: this is a single-head self-attention operation - """ - # - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.attention_op: Optional[Any] = None - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - B, C, H, W = q.shape - q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v)) - - q, k, v = map( - lambda t: t.unsqueeze(3) - .reshape(B, t.shape[1], 1, C) - .permute(0, 2, 1, 3) - .reshape(B * 1, t.shape[1], C) - .contiguous(), - (q, k, v), - ) - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) - - out = ( - out.unsqueeze(0) - .reshape(B, 1, out.shape[1], C) - .permute(0, 2, 1, 3) - .reshape(B, out.shape[1], C) - ) - out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C) - out = self.proj_out(out) - return x+out - - -class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): - def forward(self, x, context=None, mask=None): - b, c, h, w = x.shape - x = rearrange(x, 'b c h w -> b (h w) c') - out = super().forward(x, context=context, mask=mask) - out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c) - return x + out - - -def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): - assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' - if XFORMERS_IS_AVAILBLE and attn_type == "vanilla": - attn_type = "vanilla-xformers" - print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - assert attn_kwargs is None - return AttnBlock(in_channels) - elif attn_type == "vanilla-xformers": - print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...") - return MemoryEfficientAttnBlock(in_channels) - elif type == "memory-efficient-cross-attn": - attn_kwargs["query_dim"] = in_channels - return MemoryEfficientCrossAttentionWrapper(**attn_kwargs) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - raise NotImplementedError() - - -class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x, t=None, context=None): - #assert x.shape[2] == x.shape[3] == self.resolution - if context is not None: - # assume aligned context, cat along channel axis - x = torch.cat((x, context), dim=1) - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - def get_last_layer(self): - return self.conv_out.weight - - -class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", - **ignore_kwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - return h - - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1,2,3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class LatentRescaler(nn.Module): - def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): - super().__init__() - # residual block, interpolate, residual block - self.factor = factor - self.conv_in = nn.Conv2d(in_channels, - mid_channels, - kernel_size=3, - stride=1, - padding=1) - self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - - self.conv_out = nn.Conv2d(mid_channels, - out_channels, - kernel_size=1, - ) - - def forward(self, x): - x = self.conv_in(x) - for block in self.res_block1: - x = block(x, None) - x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) - x = self.attn(x) - for block in self.res_block2: - x = block(x, None) - x = self.conv_out(x) - return x - - -class MergedRescaleEncoder(nn.Module): - def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, - z_channels=intermediate_chn, double_z=False, resolution=resolution, - attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, - out_ch=None) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, - mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) - - def forward(self, x): - x = self.encoder(x) - x = self.rescaler(x) - return x - - -class MergedRescaleDecoder(nn.Module): - def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), - dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - tmp_chn = z_channels*ch_mult[-1] - self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, - resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, - ch_mult=ch_mult, resolution=resolution, ch=ch) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, - out_channels=tmp_chn, depth=rescale_module_depth) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Upsampler(nn.Module): - def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): - super().__init__() - assert out_size >= in_size - num_blocks = int(np.log2(out_size//in_size))+1 - factor_up = 1.+ (out_size % in_size) - print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") - self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, - out_channels=in_channels) - self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, - attn_resolutions=[], in_channels=None, ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)]) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode="bilinear"): - super().__init__() - self.with_conv = learned - self.mode = mode - if self.with_conv: - print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") - raise NotImplementedError() - assert in_channels is not None - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=4, - stride=2, - padding=1) - - def forward(self, x, scale_factor=1.0): - if scale_factor==1.0: - return x - else: - x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) - return x diff --git a/spaces/RichardMB1217/blip/data/nocaps_dataset.py b/spaces/RichardMB1217/blip/data/nocaps_dataset.py deleted file mode 100644 index ba0bed06d8af3dbaccf18a56e725f101e585503e..0000000000000000000000000000000000000000 --- a/spaces/RichardMB1217/blip/data/nocaps_dataset.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import json - -from torch.utils.data import Dataset -from torchvision.datasets.utils import download_url - -from PIL import Image - -class nocaps_eval(Dataset): - def __init__(self, transform, image_root, ann_root, split): - urls = {'val':'https://storage.googleapis.com/sfr-vision-language-research/datasets/nocaps_val.json', - 'test':'https://storage.googleapis.com/sfr-vision-language-research/datasets/nocaps_test.json'} - filenames = {'val':'nocaps_val.json','test':'nocaps_test.json'} - - download_url(urls[split],ann_root) - - self.annotation = json.load(open(os.path.join(ann_root,filenames[split]),'r')) - self.transform = transform - self.image_root = image_root - - def __len__(self): - return len(self.annotation) - - def __getitem__(self, index): - - ann = self.annotation[index] - - image_path = os.path.join(self.image_root,ann['image']) - image = Image.open(image_path).convert('RGB') - image = self.transform(image) - - return image, int(ann['img_id']) \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/fileio/parse.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/fileio/parse.py deleted file mode 100644 index f60f0d611b8d75692221d0edd7dc993b0a6445c9..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/fileio/parse.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from io import StringIO - -from .file_client import FileClient - - -def list_from_file(filename, - prefix='', - offset=0, - max_num=0, - encoding='utf-8', - file_client_args=None): - """Load a text file and parse the content as a list of strings. - - Note: - In v1.3.16 and later, ``list_from_file`` supports loading a text file - which can be storaged in different backends and parsing the content as - a list for strings. - - Args: - filename (str): Filename. - prefix (str): The prefix to be inserted to the beginning of each item. - offset (int): The offset of lines. - max_num (int): The maximum number of lines to be read, - zeros and negatives mean no limitation. - encoding (str): Encoding used to open the file. Default utf-8. - file_client_args (dict, optional): Arguments to instantiate a - FileClient. See :class:`mmcv.fileio.FileClient` for details. - Default: None. - - Examples: - >>> list_from_file('/path/of/your/file') # disk - ['hello', 'world'] - >>> list_from_file('s3://path/of/your/file') # ceph or petrel - ['hello', 'world'] - - Returns: - list[str]: A list of strings. - """ - cnt = 0 - item_list = [] - file_client = FileClient.infer_client(file_client_args, filename) - with StringIO(file_client.get_text(filename, encoding)) as f: - for _ in range(offset): - f.readline() - for line in f: - if 0 < max_num <= cnt: - break - item_list.append(prefix + line.rstrip('\n\r')) - cnt += 1 - return item_list - - -def dict_from_file(filename, - key_type=str, - encoding='utf-8', - file_client_args=None): - """Load a text file and parse the content as a dict. - - Each line of the text file will be two or more columns split by - whitespaces or tabs. The first column will be parsed as dict keys, and - the following columns will be parsed as dict values. - - Note: - In v1.3.16 and later, ``dict_from_file`` supports loading a text file - which can be storaged in different backends and parsing the content as - a dict. - - Args: - filename(str): Filename. - key_type(type): Type of the dict keys. str is user by default and - type conversion will be performed if specified. - encoding (str): Encoding used to open the file. Default utf-8. - file_client_args (dict, optional): Arguments to instantiate a - FileClient. See :class:`mmcv.fileio.FileClient` for details. - Default: None. - - Examples: - >>> dict_from_file('/path/of/your/file') # disk - {'key1': 'value1', 'key2': 'value2'} - >>> dict_from_file('s3://path/of/your/file') # ceph or petrel - {'key1': 'value1', 'key2': 'value2'} - - Returns: - dict: The parsed contents. - """ - mapping = {} - file_client = FileClient.infer_client(file_client_args, filename) - with StringIO(file_client.get_text(filename, encoding)) as f: - for line in f: - items = line.rstrip('\n').split() - assert len(items) >= 2 - key = key_type(items[0]) - val = items[1:] if len(items) > 2 else items[1] - mapping[key] = val - return mapping diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/runner/optimizer/builder.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/runner/optimizer/builder.py deleted file mode 100644 index f9234eed8f1f186d9d8dfda34562157ee39bdb3a..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/runner/optimizer/builder.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import inspect - -import torch - -from ...utils import Registry, build_from_cfg - -OPTIMIZERS = Registry('optimizer') -OPTIMIZER_BUILDERS = Registry('optimizer builder') - - -def register_torch_optimizers(): - torch_optimizers = [] - for module_name in dir(torch.optim): - if module_name.startswith('__'): - continue - _optim = getattr(torch.optim, module_name) - if inspect.isclass(_optim) and issubclass(_optim, - torch.optim.Optimizer): - OPTIMIZERS.register_module()(_optim) - torch_optimizers.append(module_name) - return torch_optimizers - - -TORCH_OPTIMIZERS = register_torch_optimizers() - - -def build_optimizer_constructor(cfg): - return build_from_cfg(cfg, OPTIMIZER_BUILDERS) - - -def build_optimizer(model, cfg): - optimizer_cfg = copy.deepcopy(cfg) - constructor_type = optimizer_cfg.pop('constructor', - 'DefaultOptimizerConstructor') - paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) - optim_constructor = build_optimizer_constructor( - dict( - type=constructor_type, - optimizer_cfg=optimizer_cfg, - paramwise_cfg=paramwise_cfg)) - optimizer = optim_constructor(model) - return optimizer diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/scnet.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/scnet.py deleted file mode 100644 index 04a2347c4ec1efcbfda59a134cddd8bde620d983..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/scnet.py +++ /dev/null @@ -1,10 +0,0 @@ -from ..builder import DETECTORS -from .cascade_rcnn import CascadeRCNN - - -@DETECTORS.register_module() -class SCNet(CascadeRCNN): - """Implementation of `SCNet `_""" - - def __init__(self, **kwargs): - super(SCNet, self).__init__(**kwargs) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/double_bbox_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/double_bbox_head.py deleted file mode 100644 index 6c154cb3c0d9d7639c3d4a2a1272406d3fab8acd..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/double_bbox_head.py +++ /dev/null @@ -1,172 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule, normal_init, xavier_init - -from mmdet.models.backbones.resnet import Bottleneck -from mmdet.models.builder import HEADS -from .bbox_head import BBoxHead - - -class BasicResBlock(nn.Module): - """Basic residual block. - - This block is a little different from the block in the ResNet backbone. - The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. - - Args: - in_channels (int): Channels of the input feature map. - out_channels (int): Channels of the output feature map. - conv_cfg (dict): The config dict for convolution layers. - norm_cfg (dict): The config dict for normalization layers. - """ - - def __init__(self, - in_channels, - out_channels, - conv_cfg=None, - norm_cfg=dict(type='BN')): - super(BasicResBlock, self).__init__() - - # main path - self.conv1 = ConvModule( - in_channels, - in_channels, - kernel_size=3, - padding=1, - bias=False, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg) - self.conv2 = ConvModule( - in_channels, - out_channels, - kernel_size=1, - bias=False, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - # identity path - self.conv_identity = ConvModule( - in_channels, - out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - identity = x - - x = self.conv1(x) - x = self.conv2(x) - - identity = self.conv_identity(identity) - out = x + identity - - out = self.relu(out) - return out - - -@HEADS.register_module() -class DoubleConvFCBBoxHead(BBoxHead): - r"""Bbox head used in Double-Head R-CNN - - .. code-block:: none - - /-> cls - /-> shared convs -> - \-> reg - roi features - /-> cls - \-> shared fc -> - \-> reg - """ # noqa: W605 - - def __init__(self, - num_convs=0, - num_fcs=0, - conv_out_channels=1024, - fc_out_channels=1024, - conv_cfg=None, - norm_cfg=dict(type='BN'), - **kwargs): - kwargs.setdefault('with_avg_pool', True) - super(DoubleConvFCBBoxHead, self).__init__(**kwargs) - assert self.with_avg_pool - assert num_convs > 0 - assert num_fcs > 0 - self.num_convs = num_convs - self.num_fcs = num_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - # increase the channel of input features - self.res_block = BasicResBlock(self.in_channels, - self.conv_out_channels) - - # add conv heads - self.conv_branch = self._add_conv_branch() - # add fc heads - self.fc_branch = self._add_fc_branch() - - out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes - self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) - - self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) - self.relu = nn.ReLU(inplace=True) - - def _add_conv_branch(self): - """Add the fc branch which consists of a sequential of conv layers.""" - branch_convs = nn.ModuleList() - for i in range(self.num_convs): - branch_convs.append( - Bottleneck( - inplanes=self.conv_out_channels, - planes=self.conv_out_channels // 4, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - return branch_convs - - def _add_fc_branch(self): - """Add the fc branch which consists of a sequential of fc layers.""" - branch_fcs = nn.ModuleList() - for i in range(self.num_fcs): - fc_in_channels = ( - self.in_channels * - self.roi_feat_area if i == 0 else self.fc_out_channels) - branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) - return branch_fcs - - def init_weights(self): - # conv layers are already initialized by ConvModule - normal_init(self.fc_cls, std=0.01) - normal_init(self.fc_reg, std=0.001) - - for m in self.fc_branch.modules(): - if isinstance(m, nn.Linear): - xavier_init(m, distribution='uniform') - - def forward(self, x_cls, x_reg): - # conv head - x_conv = self.res_block(x_reg) - - for conv in self.conv_branch: - x_conv = conv(x_conv) - - if self.with_avg_pool: - x_conv = self.avg_pool(x_conv) - - x_conv = x_conv.view(x_conv.size(0), -1) - bbox_pred = self.fc_reg(x_conv) - - # fc head - x_fc = x_cls.view(x_cls.size(0), -1) - for fc in self.fc_branch: - x_fc = self.relu(fc(x_fc)) - - cls_score = self.fc_cls(x_fc) - - return cls_score, bbox_pred diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/hswish.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/hswish.py deleted file mode 100644 index 7e0c090ff037c99ee6c5c84c4592e87beae02208..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/hswish.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn - -from .registry import ACTIVATION_LAYERS - - -@ACTIVATION_LAYERS.register_module() -class HSwish(nn.Module): - """Hard Swish Module. - - This module applies the hard swish function: - - .. math:: - Hswish(x) = x * ReLU6(x + 3) / 6 - - Args: - inplace (bool): can optionally do the operation in-place. - Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, inplace=False): - super(HSwish, self).__init__() - self.act = nn.ReLU6(inplace) - - def forward(self, x): - return x * self.act(x + 3) / 6 diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/masked_conv.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/masked_conv.py deleted file mode 100644 index cd514cc204c1d571ea5dc7e74b038c0f477a008b..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/masked_conv.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['masked_im2col_forward', 'masked_col2im_forward']) - - -class MaskedConv2dFunction(Function): - - @staticmethod - def symbolic(g, features, mask, weight, bias, padding, stride): - return g.op( - 'mmcv::MMCVMaskedConv2d', - features, - mask, - weight, - bias, - padding_i=padding, - stride_i=stride) - - @staticmethod - def forward(ctx, features, mask, weight, bias, padding=0, stride=1): - assert mask.dim() == 3 and mask.size(0) == 1 - assert features.dim() == 4 and features.size(0) == 1 - assert features.size()[2:] == mask.size()[1:] - pad_h, pad_w = _pair(padding) - stride_h, stride_w = _pair(stride) - if stride_h != 1 or stride_w != 1: - raise ValueError( - 'Stride could not only be 1 in masked_conv2d currently.') - out_channel, in_channel, kernel_h, kernel_w = weight.size() - - batch_size = features.size(0) - out_h = int( - math.floor((features.size(2) + 2 * pad_h - - (kernel_h - 1) - 1) / stride_h + 1)) - out_w = int( - math.floor((features.size(3) + 2 * pad_w - - (kernel_h - 1) - 1) / stride_w + 1)) - mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False) - output = features.new_zeros(batch_size, out_channel, out_h, out_w) - if mask_inds.numel() > 0: - mask_h_idx = mask_inds[:, 0].contiguous() - mask_w_idx = mask_inds[:, 1].contiguous() - data_col = features.new_zeros(in_channel * kernel_h * kernel_w, - mask_inds.size(0)) - ext_module.masked_im2col_forward( - features, - mask_h_idx, - mask_w_idx, - data_col, - kernel_h=kernel_h, - kernel_w=kernel_w, - pad_h=pad_h, - pad_w=pad_w) - - masked_output = torch.addmm(1, bias[:, None], 1, - weight.view(out_channel, -1), data_col) - ext_module.masked_col2im_forward( - masked_output, - mask_h_idx, - mask_w_idx, - output, - height=out_h, - width=out_w, - channels=out_channel) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - return (None, ) * 5 - - -masked_conv2d = MaskedConv2dFunction.apply - - -class MaskedConv2d(nn.Conv2d): - """A MaskedConv2d which inherits the official Conv2d. - - The masked forward doesn't implement the backward function and only - supports the stride parameter to be 1 currently. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True): - super(MaskedConv2d, - self).__init__(in_channels, out_channels, kernel_size, stride, - padding, dilation, groups, bias) - - def forward(self, input, mask=None): - if mask is None: # fallback to the normal Conv2d - return super(MaskedConv2d, self).forward(input) - else: - return masked_conv2d(input, mask, self.weight, self.bias, - self.padding) diff --git a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/datasets/dataset_512_val.py b/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/datasets/dataset_512_val.py deleted file mode 100644 index ef802863464db76ae79f7e06bdb9722b3525f0cf..0000000000000000000000000000000000000000 --- a/spaces/Rothfeld/stable-diffusion-mat-outpainting-primer/datasets/dataset_512_val.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import numpy as np -import zipfile -import PIL.Image -import cv2 -import json -import torch -import dnnlib -import glob - -try: - import pyspng -except ImportError: - pyspng = None - -from datasets.mask_generator_512 import RandomMask - -#---------------------------------------------------------------------------- - -class Dataset(torch.utils.data.Dataset): - def __init__(self, - name, # Name of the dataset. - raw_shape, # Shape of the raw image data (NCHW). - max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip. - use_labels = False, # Enable conditioning labels? False = label dimension is zero. - xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size. - random_seed = 0, # Random seed to use when applying max_size. - ): - self._name = name - self._raw_shape = list(raw_shape) - self._use_labels = use_labels - self._raw_labels = None - self._label_shape = None - - # Apply max_size. - self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64) - if (max_size is not None) and (self._raw_idx.size > max_size): - np.random.RandomState(random_seed).shuffle(self._raw_idx) - self._raw_idx = np.sort(self._raw_idx[:max_size]) - - # Apply xflip. - self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8) - if xflip: - self._raw_idx = np.tile(self._raw_idx, 2) - self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)]) - - def _get_raw_labels(self): - if self._raw_labels is None: - self._raw_labels = self._load_raw_labels() if self._use_labels else None - if self._raw_labels is None: - self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32) - assert isinstance(self._raw_labels, np.ndarray) - assert self._raw_labels.shape[0] == self._raw_shape[0] - assert self._raw_labels.dtype in [np.float32, np.int64] - if self._raw_labels.dtype == np.int64: - assert self._raw_labels.ndim == 1 - assert np.all(self._raw_labels >= 0) - return self._raw_labels - - def close(self): # to be overridden by subclass - pass - - def _load_raw_image(self, raw_idx): # to be overridden by subclass - raise NotImplementedError - - def _load_raw_labels(self): # to be overridden by subclass - raise NotImplementedError - - def __getstate__(self): - return dict(self.__dict__, _raw_labels=None) - - def __del__(self): - try: - self.close() - except: - pass - - def __len__(self): - return self._raw_idx.size - - def __getitem__(self, idx): - image = self._load_raw_image(self._raw_idx[idx]) - assert isinstance(image, np.ndarray) - assert list(image.shape) == self.image_shape - assert image.dtype == np.uint8 - if self._xflip[idx]: - assert image.ndim == 3 # CHW - image = image[:, :, ::-1] - return image.copy(), self.get_label(idx) - - def get_label(self, idx): - label = self._get_raw_labels()[self._raw_idx[idx]] - if label.dtype == np.int64: - onehot = np.zeros(self.label_shape, dtype=np.float32) - onehot[label] = 1 - label = onehot - return label.copy() - - def get_details(self, idx): - d = dnnlib.EasyDict() - d.raw_idx = int(self._raw_idx[idx]) - d.xflip = (int(self._xflip[idx]) != 0) - d.raw_label = self._get_raw_labels()[d.raw_idx].copy() - return d - - @property - def name(self): - return self._name - - @property - def image_shape(self): - return list(self._raw_shape[1:]) - - @property - def num_channels(self): - assert len(self.image_shape) == 3 # CHW - return self.image_shape[0] - - @property - def resolution(self): - assert len(self.image_shape) == 3 # CHW - assert self.image_shape[1] == self.image_shape[2] - return self.image_shape[1] - - @property - def label_shape(self): - if self._label_shape is None: - raw_labels = self._get_raw_labels() - if raw_labels.dtype == np.int64: - self._label_shape = [int(np.max(raw_labels)) + 1] - else: - self._label_shape = raw_labels.shape[1:] - return list(self._label_shape) - - @property - def label_dim(self): - assert len(self.label_shape) == 1 - return self.label_shape[0] - - @property - def has_labels(self): - return any(x != 0 for x in self.label_shape) - - @property - def has_onehot_labels(self): - return self._get_raw_labels().dtype == np.int64 - - -#---------------------------------------------------------------------------- - - -class ImageFolderMaskDataset(Dataset): - def __init__(self, - path, # Path to directory or zip. - resolution = None, # Ensure specific resolution, None = highest available. - hole_range=[0,1], - **super_kwargs, # Additional arguments for the Dataset base class. - ): - self._path = path - self._zipfile = None - self._hole_range = hole_range - - if os.path.isdir(self._path): - self._type = 'dir' - self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files} - elif self._file_ext(self._path) == '.zip': - self._type = 'zip' - self._all_fnames = set(self._get_zipfile().namelist()) - else: - raise IOError('Path must point to a directory or zip') - - PIL.Image.init() - self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION) - if len(self._image_fnames) == 0: - raise IOError('No image files found in the specified path') - - name = os.path.splitext(os.path.basename(self._path))[0] - raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape) - if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution): - raise IOError('Image files do not match the specified resolution') - self._load_mask() - super().__init__(name=name, raw_shape=raw_shape, **super_kwargs) - - def _load_mask(self, mpath='/data/liwenbo/datasets/Places365/standard/masks_val_512_eval'): - self.masks = sorted(glob.glob(mpath + '/*.png')) - - @staticmethod - def _file_ext(fname): - return os.path.splitext(fname)[1].lower() - - def _get_zipfile(self): - assert self._type == 'zip' - if self._zipfile is None: - self._zipfile = zipfile.ZipFile(self._path) - return self._zipfile - - def _open_file(self, fname): - if self._type == 'dir': - return open(os.path.join(self._path, fname), 'rb') - if self._type == 'zip': - return self._get_zipfile().open(fname, 'r') - return None - - def close(self): - try: - if self._zipfile is not None: - self._zipfile.close() - finally: - self._zipfile = None - - def __getstate__(self): - return dict(super().__getstate__(), _zipfile=None) - - def _load_raw_image(self, raw_idx): - fname = self._image_fnames[raw_idx] - with self._open_file(fname) as f: - if pyspng is not None and self._file_ext(fname) == '.png': - image = pyspng.load(f.read()) - else: - image = np.array(PIL.Image.open(f)) - if image.ndim == 2: - image = image[:, :, np.newaxis] # HW => HWC - - # for grayscale image - if image.shape[2] == 1: - image = np.repeat(image, 3, axis=2) - - # restricted to 512x512 - res = 512 - H, W, C = image.shape - if H < res or W < res: - top = 0 - bottom = max(0, res - H) - left = 0 - right = max(0, res - W) - image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_REFLECT) - H, W, C = image.shape - h = (H - res) // 2 - w = (W - res) // 2 - image = image[h:h+res, w:w+res, :] - - image = np.ascontiguousarray(image.transpose(2, 0, 1)) # HWC => CHW - return image - - def _load_raw_labels(self): - fname = 'labels.json' - if fname not in self._all_fnames: - return None - with self._open_file(fname) as f: - labels = json.load(f)['labels'] - if labels is None: - return None - labels = dict(labels) - labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames] - labels = np.array(labels) - labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim]) - return labels - - def __getitem__(self, idx): - image = self._load_raw_image(self._raw_idx[idx]) - - # for grayscale image - if image.shape[0] == 1: - image = np.repeat(image, 3, axis=0) - - assert isinstance(image, np.ndarray) - assert list(image.shape) == self.image_shape - assert image.dtype == np.uint8 - if self._xflip[idx]: - assert image.ndim == 3 # CHW - image = image[:, :, ::-1] - # mask = RandomMask(image.shape[-1], hole_range=self._hole_range) # hole as 0, reserved as 1 - mask = cv2.imread(self.masks[idx], cv2.IMREAD_GRAYSCALE).astype(np.float32)[np.newaxis, :, :] / 255.0 - return image.copy(), mask, self.get_label(idx) diff --git a/spaces/Ryukijano/Ryukijano-controlnet-fill-circle/app.py b/spaces/Ryukijano/Ryukijano-controlnet-fill-circle/app.py deleted file mode 100644 index 0e4962990ab1bebe681e3e5c2dbf6ca37ba87f8a..0000000000000000000000000000000000000000 --- a/spaces/Ryukijano/Ryukijano-controlnet-fill-circle/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import gradio as gr -from transformers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline, AutoTokenizer - -def load_model(model_name): - tokenizer = AutoTokenizer.from_pretrained(model_name) - controlnet = FlaxControlNetModel.from_pretrained(model_name) - pipeline = FlaxStableDiffusionControlNetPipeline.from_pretrained(model_name) - return tokenizer, controlnet, pipeline - -model_name = "Ryukijano/controlnet-fill-circle" -tokenizer, controlnet, pipeline = load_model(model_name) - -def infer_fill_circle(prompt, image): - # Your inference function for fill circle control - inputs = tokenizer(prompt, return_tensors="jax") - # Implement your image preprocessing here - outputs = pipeline.generate(inputs, image) - return outputs - -with gr.Blocks(theme='gradio/soft') as demo: - gr.Markdown("## Stable Diffusion with Fill Circle Control") - gr.Markdown("In this app, you can find the ControlNet with Fill Circle control.") - - with gr.Tab("ControlNet Fill Circle"): - prompt_input_fill_circle = gr.Textbox(label="Prompt") - negative_prompt_fill_circle = gr.Textbox(label="Negative Prompt") - fill_circle_input = gr.Image(label="Input Image") - fill_circle_output = gr.Image(label="Output Image") - submit_btn = gr.Button(value="Submit") - fill_circle_inputs = [prompt_input_fill_circle, fill_circle_input] - submit_btn.click(fn=infer_fill_circle, inputs=fill_circle_inputs, outputs=[fill_circle_output]) - -demo.launch() - diff --git a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/text/symbols.py b/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/text/symbols.py deleted file mode 100644 index 053a7105f7ce95aa51614f6995399fa2172b3eb2..0000000000000000000000000000000000000000 --- a/spaces/Sakukaze/VITS-Umamusume-voice-synthesizer/text/symbols.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' - - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -'''# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/train_sppe/src/train.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/train_sppe/src/train.py deleted file mode 100644 index 82a9258a8782ae3760dbd3a19590adad382011d6..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/train_sppe/src/train.py +++ /dev/null @@ -1,210 +0,0 @@ -# ----------------------------------------------------- -# Copyright (c) Shanghai Jiao Tong University. All rights reserved. -# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com) -# ----------------------------------------------------- - -import torch -import torch.utils.data -from .utils.dataset import coco -from opt import opt -from tqdm import tqdm -from models.FastPose import createModel -from .utils.eval import DataLogger, accuracy -from .utils.img import flip, shuffleLR -from .evaluation import prediction - -from tensorboardX import SummaryWriter -import os - - -def train(train_loader, m, criterion, optimizer, writer): - lossLogger = DataLogger() - accLogger = DataLogger() - m.train() - - train_loader_desc = tqdm(train_loader) - - for i, (inps, labels, setMask, imgset) in enumerate(train_loader_desc): - inps = inps.requires_grad_() - labels = labels - setMask = setMask - out = m(inps) - - loss = criterion(out.mul(setMask), labels) - - acc = accuracy(out.data.mul(setMask), labels.data, train_loader.dataset) - - accLogger.update(acc[0], inps.size(0)) - lossLogger.update(loss.item(), inps.size(0)) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - opt.trainIters += 1 - # Tensorboard - writer.add_scalar( - 'Train/Loss', lossLogger.avg, opt.trainIters) - writer.add_scalar( - 'Train/Acc', accLogger.avg, opt.trainIters) - - # TQDM - train_loader_desc.set_description( - 'loss: {loss:.8f} | acc: {acc:.2f}'.format( - loss=lossLogger.avg, - acc=accLogger.avg * 100) - ) - - train_loader_desc.close() - - return lossLogger.avg, accLogger.avg - - -def valid(val_loader, m, criterion, optimizer, writer): - lossLogger = DataLogger() - accLogger = DataLogger() - m.eval() - - val_loader_desc = tqdm(val_loader) - - for i, (inps, labels, setMask, imgset) in enumerate(val_loader_desc): - inps = inps - labels = labels - setMask = setMask - - with torch.no_grad(): - out = m(inps) - - loss = criterion(out.mul(setMask), labels) - - flip_out = m(flip(inps)) - flip_out = flip(shuffleLR(flip_out, val_loader.dataset)) - - out = (flip_out + out) / 2 - - acc = accuracy(out.mul(setMask), labels, val_loader.dataset) - - lossLogger.update(loss.item(), inps.size(0)) - accLogger.update(acc[0], inps.size(0)) - - opt.valIters += 1 - - # Tensorboard - writer.add_scalar( - 'Valid/Loss', lossLogger.avg, opt.valIters) - writer.add_scalar( - 'Valid/Acc', accLogger.avg, opt.valIters) - - val_loader_desc.set_description( - 'loss: {loss:.8f} | acc: {acc:.2f}'.format( - loss=lossLogger.avg, - acc=accLogger.avg * 100) - ) - - val_loader_desc.close() - - return lossLogger.avg, accLogger.avg - - -def main(): - - # Model Initialize - m = createModel() - if opt.loadModel: - print('Loading Model from {}'.format(opt.loadModel)) - m.load_state_dict(torch.load(opt.loadModel, map_location=torch.device('cpu'))) - if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)): - try: - os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID)) - except FileNotFoundError: - os.mkdir("../exp/{}".format(opt.dataset)) - os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID)) - else: - print('Create new model') - if not os.path.exists("../exp/{}/{}".format(opt.dataset, opt.expID)): - try: - os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID)) - except FileNotFoundError: - os.mkdir("../exp/{}".format(opt.dataset)) - os.mkdir("../exp/{}/{}".format(opt.dataset, opt.expID)) - - criterion = torch.nn.MSELoss() - - if opt.optMethod == 'rmsprop': - optimizer = torch.optim.RMSprop(m.parameters(), - lr=opt.LR, - momentum=opt.momentum, - weight_decay=opt.weightDecay) - elif opt.optMethod == 'adam': - optimizer = torch.optim.Adam( - m.parameters(), - lr=opt.LR - ) - else: - raise Exception - - writer = SummaryWriter( - '.tensorboard/{}/{}'.format(opt.dataset, opt.expID)) - - # Prepare Dataset - if opt.dataset == 'coco': - train_dataset = coco.Mscoco(train=True) - val_dataset = coco.Mscoco(train=False) - - train_loader = torch.utils.data.DataLoader( - train_dataset, batch_size=opt.trainBatch, shuffle=True, num_workers=opt.nThreads, pin_memory=True) - - val_loader = torch.utils.data.DataLoader( - val_dataset, batch_size=opt.validBatch, shuffle=False, num_workers=opt.nThreads, pin_memory=True) - - # Model Transfer - m = torch.nn.DataParallel(m) - - # Start Training - for i in range(opt.nEpochs): - opt.epoch = i - - print('############# Starting Epoch {} #############'.format(opt.epoch)) - loss, acc = train(train_loader, m, criterion, optimizer, writer) - - print('Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format( - idx=opt.epoch, - loss=loss, - acc=acc - )) - - opt.acc = acc - opt.loss = loss - m_dev = m.module - if i % opt.snapshot == 0: - torch.save( - m_dev.state_dict(), '../exp/{}/{}/model_{}.pkl'.format(opt.dataset, opt.expID, opt.epoch)) - torch.save( - opt, '../exp/{}/{}/option.pkl'.format(opt.dataset, opt.expID, opt.epoch)) - torch.save( - optimizer, '../exp/{}/{}/optimizer.pkl'.format(opt.dataset, opt.expID)) - - loss, acc = valid(val_loader, m, criterion, optimizer, writer) - - print('Valid-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format( - idx=i, - loss=loss, - acc=acc - )) - - ''' - if opt.dataset != 'mpii': - with torch.no_grad(): - mAP, mAP5 = prediction(m) - - print('Prediction-{idx:d} epoch | mAP:{mAP:.3f} | mAP0.5:{mAP5:.3f}'.format( - idx=i, - mAP=mAP, - mAP5=mAP5 - )) - ''' - writer.close() - - -if __name__ == '__main__': - main() diff --git a/spaces/Saturdays/ClassificationPeripheralBloodCell/main_page.py b/spaces/Saturdays/ClassificationPeripheralBloodCell/main_page.py deleted file mode 100644 index 81262c37e7d87b63c80652350385c81bca07c32d..0000000000000000000000000000000000000000 --- a/spaces/Saturdays/ClassificationPeripheralBloodCell/main_page.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Tue Dec 27 16:16:06 2022 - -@author: Usuario -""" -import streamlit as st -import imagen_subida as ims -from keras.models import load_model -from os import system -#Cargar el modelo -import os -import patoolib -from shutil import rmtree -from os import remove - -""" -Comentado por falta de 7z, se sube el archivo descomprimido directamente -if os.path.isdir("./model_subir/model") == True: - rmtree("./model_subir/model/") -if os.path.isfile("./model_subir/test_model.zip") == True: - remove("./model_subir/test_model.zip") -os.system("cat ./model_subir/vgg19_trainable_true_best_model_pruebita.7z.* > ./model_subir/test_model.zip") - - -patoolib.extract_archive("./model_subir/test_model.zip",outdir="./model_subir/model/") -#model = load_model('../../../model/classification/vgg19_trainable_true_best_model.h5') - -""" - - -model = load_model('./model_subir/model/vgg19_trainable_true_best_model.h5') - -size = (224, 224) - -def main_page(clicked, label_names): - title = ims.change_title(clicked) - labs = ims.change_labels(clicked) - column1, column2 = st.columns(2) - holder_up = st.empty() - - with column1: - st.write('') - uploaded_image = holder_up.file_uploader('') - - holder_add_text = st.empty() - with column2: - additional_text = '' #holder_add_text.write('In order to estimate which is the classification of your image, drop your file at the left') - - if uploaded_image: - #container = st.container() - add_tex = ims.additional_text_chart(clicked) # - st.write(add_tex) - result_texts = ims.result_text(clicked) - ims.resultados(uploaded_image, model, size, label_names, labs, result_texts) - #container.markdown(res) - holder_up.empty() - holder_add_text.empty() \ No newline at end of file diff --git a/spaces/SeViLA/SeViLA/lavis/datasets/datasets/video_vqa_datasets.py b/spaces/SeViLA/SeViLA/lavis/datasets/datasets/video_vqa_datasets.py deleted file mode 100644 index d1bd0a61c3b12e5f41fc386cff958c1c1830075f..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/datasets/datasets/video_vqa_datasets.py +++ /dev/null @@ -1,62 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import json -import os -from collections import OrderedDict - -from lavis.datasets.datasets.multimodal_classification_datasets import ( - MultimodalClassificationDataset, -) - - -class __DisplMixin: - def displ_item(self, index): - ann = self.annotation[index] - vname = ann["video"] - vpath = os.path.join(self.vis_root, vname) - - return OrderedDict( - {"file": vpath, "question": ann["question"], "answer": ann["answer"]} - ) - - -class VideoQADataset(MultimodalClassificationDataset, __DisplMixin): - def __init__(self, vis_processor, text_processor, vis_root, ann_paths): - super().__init__(vis_processor, text_processor, vis_root, ann_paths) - - def _build_class_labels(self, ans_path): - ans2label = json.load(open(ans_path)) - - self.class_labels = ans2label - - def _get_answer_label(self, answer): - if answer in self.class_labels: - return self.class_labels[answer] - else: - return len(self.class_labels) - - def __getitem__(self, index): - assert ( - self.class_labels - ), f"class_labels of {__class__.__name__} is not built yet." - - ann = self.annotation[index] - - vname = ann["video"] - vpath = os.path.join(self.vis_root, vname) - - frms = self.vis_processor(vpath) - question = self.text_processor(ann["question"]) - - return { - "video": frms, - "text_input": question, - "answers": self._get_answer_label(ann["answer"]), - "question_id": ann["question_id"], - "instance_id": ann["instance_id"], - } diff --git a/spaces/ServerX/PorcoDiaz/julius/resample.py b/spaces/ServerX/PorcoDiaz/julius/resample.py deleted file mode 100644 index fd3b9b547d4c33ec7136d32e5f086420d0a72e14..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/julius/resample.py +++ /dev/null @@ -1,216 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 -""" -Differentiable, Pytorch based resampling. -Implementation of Julius O. Smith algorithm for resampling. -See https://ccrma.stanford.edu/~jos/resample/ for details. -This implementation is specially optimized for when new_sr / old_sr is a fraction -with a small numerator and denominator when removing the gcd (e.g. new_sr = 700, old_sr = 500). - -Very similar to [bmcfee/resampy](https://github.com/bmcfee/resampy) except this implementation -is optimized for the case mentioned before, while resampy is slower but more general. - -""" - -import math -from typing import Optional - -import torch -from torch.nn import functional as F - -from .core import sinc -from .utils import simple_repr - - -class ResampleFrac(torch.nn.Module): - """ - Resampling from the sample rate `old_sr` to `new_sr`. - """ - def __init__(self, old_sr: int, new_sr: int, zeros: int = 24, rolloff: float = 0.945): - """ - Args: - old_sr (int): sample rate of the input signal x. - new_sr (int): sample rate of the output. - zeros (int): number of zero crossing to keep in the sinc filter. - rolloff (float): use a lowpass filter that is `rolloff * new_sr / 2`, - to ensure sufficient margin due to the imperfection of the FIR filter used. - Lowering this value will reduce anti-aliasing, but will reduce some of the - highest frequencies. - - Shape: - - - Input: `[*, T]` - - Output: `[*, T']` with `T' = int(new_sr * T / old_sr) - - - .. caution:: - After dividing `old_sr` and `new_sr` by their GCD, both should be small - for this implementation to be fast. - - >>> import torch - >>> resample = ResampleFrac(4, 5) - >>> x = torch.randn(1000) - >>> print(len(resample(x))) - 1250 - """ - super().__init__() - if not isinstance(old_sr, int) or not isinstance(new_sr, int): - raise ValueError("old_sr and new_sr should be integers") - gcd = math.gcd(old_sr, new_sr) - self.old_sr = old_sr // gcd - self.new_sr = new_sr // gcd - self.zeros = zeros - self.rolloff = rolloff - - self._init_kernels() - - def _init_kernels(self): - if self.old_sr == self.new_sr: - return - - kernels = [] - sr = min(self.new_sr, self.old_sr) - # rolloff will perform antialiasing filtering by removing the highest frequencies. - # At first I thought I only needed this when downsampling, but when upsampling - # you will get edge artifacts without this, the edge is equivalent to zero padding, - # which will add high freq artifacts. - sr *= self.rolloff - - # The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor) - # using the sinc interpolation formula: - # x(t) = sum_i x[i] sinc(pi * old_sr * (i / old_sr - t)) - # We can then sample the function x(t) with a different sample rate: - # y[j] = x(j / new_sr) - # or, - # y[j] = sum_i x[i] sinc(pi * old_sr * (i / old_sr - j / new_sr)) - - # We see here that y[j] is the convolution of x[i] with a specific filter, for which - # we take an FIR approximation, stopping when we see at least `zeros` zeros crossing. - # But y[j+1] is going to have a different set of weights and so on, until y[j + new_sr]. - # Indeed: - # y[j + new_sr] = sum_i x[i] sinc(pi * old_sr * ((i / old_sr - (j + new_sr) / new_sr)) - # = sum_i x[i] sinc(pi * old_sr * ((i - old_sr) / old_sr - j / new_sr)) - # = sum_i x[i + old_sr] sinc(pi * old_sr * (i / old_sr - j / new_sr)) - # so y[j+new_sr] uses the same filter as y[j], but on a shifted version of x by `old_sr`. - # This will explain the F.conv1d after, with a stride of old_sr. - self._width = math.ceil(self.zeros * self.old_sr / sr) - # If old_sr is still big after GCD reduction, most filters will be very unbalanced, i.e., - # they will have a lot of almost zero values to the left or to the right... - # There is probably a way to evaluate those filters more efficiently, but this is kept for - # future work. - idx = torch.arange(-self._width, self._width + self.old_sr).float() - for i in range(self.new_sr): - t = (-i/self.new_sr + idx/self.old_sr) * sr - t = t.clamp_(-self.zeros, self.zeros) - t *= math.pi - window = torch.cos(t/self.zeros/2)**2 - kernel = sinc(t) * window - # Renormalize kernel to ensure a constant signal is preserved. - kernel.div_(kernel.sum()) - kernels.append(kernel) - - self.register_buffer("kernel", torch.stack(kernels).view(self.new_sr, 1, -1)) - - def forward(self, x: torch.Tensor, output_length: Optional[int] = None, full: bool = False): - """ - Resample x. - Args: - x (Tensor): signal to resample, time should be the last dimension - output_length (None or int): This can be set to the desired output length - (last dimension). Allowed values are between 0 and - ceil(length * new_sr / old_sr). When None (default) is specified, the - floored output length will be used. In order to select the largest possible - size, use the `full` argument. - full (bool): return the longest possible output from the input. This can be useful - if you chain resampling operations, and want to give the `output_length` only - for the last one, while passing `full=True` to all the other ones. - """ - if self.old_sr == self.new_sr: - return x - shape = x.shape - length = x.shape[-1] - x = x.reshape(-1, length) - x = F.pad(x[:, None], (self._width, self._width + self.old_sr), mode='replicate') - ys = F.conv1d(x, self.kernel, stride=self.old_sr) # type: ignore - y = ys.transpose(1, 2).reshape(list(shape[:-1]) + [-1]) - - float_output_length = self.new_sr * length / self.old_sr - max_output_length = int(math.ceil(float_output_length)) - default_output_length = int(float_output_length) - if output_length is None: - output_length = max_output_length if full else default_output_length - elif output_length < 0 or output_length > max_output_length: - raise ValueError(f"output_length must be between 0 and {max_output_length}") - else: - if full: - raise ValueError("You cannot pass both full=True and output_length") - return y[..., :output_length] - - def __repr__(self): - return simple_repr(self) - - -def resample_frac(x: torch.Tensor, old_sr: int, new_sr: int, - zeros: int = 24, rolloff: float = 0.945, - output_length: Optional[int] = None, full: bool = False): - """ - Functional version of `ResampleFrac`, refer to its documentation for more information. - - ..warning:: - If you call repeatidly this functions with the same sample rates, then the - resampling kernel will be recomputed everytime. For best performance, you should use - and cache an instance of `ResampleFrac`. - """ - return ResampleFrac(old_sr, new_sr, zeros, rolloff).to(x)(x, output_length, full) - - -# Easier implementations for downsampling and upsampling by a factor of 2 -# Kept for testing and reference - -def _kernel_upsample2_downsample2(zeros): - # Kernel for upsampling and downsampling by a factor of 2. Interestingly, - # it is the same kernel used for both. - win = torch.hann_window(4 * zeros + 1, periodic=False) - winodd = win[1::2] - t = torch.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) - t *= math.pi - kernel = (sinc(t) * winodd).view(1, 1, -1) - return kernel - - -def _upsample2(x, zeros=24): - """ - Upsample x by a factor of two. The output will be exactly twice as long as the input. - Args: - x (Tensor): signal to upsample, time should be the last dimension - zeros (int): number of zero crossing to keep in the sinc filter. - - This function is kept only for reference, you should use the more generic `resample_frac` - one. This function does not perform anti-aliasing filtering. - """ - *other, time = x.shape - kernel = _kernel_upsample2_downsample2(zeros).to(x) - out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(*other, time) - y = torch.stack([x, out], dim=-1) - return y.view(*other, -1) - - -def _downsample2(x, zeros=24): - """ - Downsample x by a factor of two. The output length is half of the input, ceiled. - Args: - x (Tensor): signal to downsample, time should be the last dimension - zeros (int): number of zero crossing to keep in the sinc filter. - - This function is kept only for reference, you should use the more generic `resample_frac` - one. This function does not perform anti-aliasing filtering. - """ - if x.shape[-1] % 2 != 0: - x = F.pad(x, (0, 1)) - xeven = x[..., ::2] - xodd = x[..., 1::2] - *other, time = xodd.shape - kernel = _kernel_upsample2_downsample2(zeros).to(x) - out = xeven + F.conv1d(xodd.view(-1, 1, time), kernel, padding=zeros)[..., :-1].view( - *other, time) - return out.view(*other, -1).mul(0.5) diff --git a/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/version.py b/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/version.py deleted file mode 100644 index b794fd409a5e3b3b65ad76a43d6a01a318877640..0000000000000000000000000000000000000000 --- a/spaces/ShilongLiu/Grounding_DINO_demo/groundingdino/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.1.0' diff --git a/spaces/Slep/CondViT-LRVSF-Demo/src/model.py b/spaces/Slep/CondViT-LRVSF-Demo/src/model.py deleted file mode 100644 index 541c11b0e3a839a2660775139f4be8939d061d60..0000000000000000000000000000000000000000 --- a/spaces/Slep/CondViT-LRVSF-Demo/src/model.py +++ /dev/null @@ -1,213 +0,0 @@ -categories = [ - "Bags", - "Feet", - "Hands", - "Head", - "Lower Body", - "Neck", - "Outwear", - "Upper Body", - "Waist", - "Whole Body", -] - -import torch -from torch import nn - -from collections import OrderedDict -import logging - -logger = logging.getLogger(__name__) - - -class LayerNorm(nn.LayerNorm): - """Subclass torch's LayerNorm to handle fp16.""" - - def forward(self, x: torch.Tensor): - if self.weight.dtype != x.dtype: - orig_type = x.dtype - ret = super().forward(x.type(self.weight.dtype)) - return ret.type(orig_type) - else: - return super().forward(x) - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor): - return x * torch.sigmoid(1.702 * x) - - -class ResidualAttentionBlock(nn.Module): - def __init__( - self, - d_model: int, - n_head: int, - attn_mask: torch.Tensor = None, - ): - super().__init__() - - self.attn = nn.MultiheadAttention(d_model, n_head) - self.ln_1 = LayerNorm(d_model) - self.mlp = nn.Sequential( - OrderedDict( - [ - ( - "c_fc", - nn.Linear(d_model, d_model * 4), - ), - ("gelu", QuickGELU()), - ( - "c_proj", - nn.Linear(d_model * 4, d_model), - ), - ] - ) - ) - self.ln_2 = LayerNorm(d_model) - self.attn_mask = attn_mask - - def attention(self, x: torch.Tensor): - self.attn_mask = ( - self.attn_mask.to(dtype=x.dtype, device=x.device) - if self.attn_mask is not None - else None - ) - return self.attn( - x, - x, - x, - need_weights=False, - attn_mask=self.attn_mask, - )[0] - - def forward(self, x: torch.Tensor): - x = x + self.attention(self.ln_1(x)) - x = x + self.mlp(self.ln_2(x)) - return x - - -class Transformer(nn.Module): - def __init__( - self, - width: int, - layers: int, - heads: int, - attn_mask: torch.Tensor = None, - ): - super().__init__() - self.width = width - self.layers = layers - self.resblocks = nn.Sequential( - *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)] - ) - - def forward(self, x: torch.Tensor): - return self.resblocks(x) - - -class ConditionalViT(nn.Module): - def __init__( - self, - input_resolution: int, - patch_size: int, - width: int, - layers: int, - heads: int, - output_dim: int, - n_categories: int = None, - **kwargs, - ): - if kwargs: - logger.warning(f"Got unused kwargs : {kwargs}") - - super().__init__() - self.input_resolution = input_resolution - self.output_dim = output_dim - self.conv1 = nn.Conv2d( - in_channels=3, - out_channels=width, - kernel_size=patch_size, - stride=patch_size, - bias=False, - ) - - scale = width**-0.5 - - self.class_embedding = nn.Parameter(scale * torch.randn(width)) - - self.n_categories = n_categories - if self.n_categories: - self.c_embedding = nn.Embedding(self.n_categories, width) - self.c_pos_embedding = nn.Parameter(scale * torch.randn(1, width)) - - self.positional_embedding = nn.Parameter( - scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width) - ) - self.ln_pre = LayerNorm(width) - - self.transformer = Transformer(width, layers, heads) - self.ln_post = LayerNorm(width) - self.logit_scale = torch.nn.Parameter(torch.ones([]) * 4.6052) - - self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) - - def forward(self, imgs: torch.Tensor, c: torch.Tensor = None): - """ - imgs : Batch of images - c : category indices. - """ - - x = self.conv1(imgs) # shape = [*, width, grid, grid] - # shape = [*, width, grid ** 2] - x = x.reshape(x.shape[0], x.shape[1], -1) - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - - # [CLS, grid] + maybe Categories. - tokens = [self.class_embedding.tile(x.shape[0], 1, 1), x] # NLD - pos_embed = [self.positional_embedding] # LD - - if self.n_categories and c is not None: # If c is None, we don't add the token - tokens += [self.c_embedding(c).unsqueeze(1)] # ND -> N1D - pos_embed += [self.c_pos_embedding] # 1D - - x = torch.cat( - tokens, - dim=1, - ) # shape = [*, grid ** 2 + 1|2, width] = N(L|L+1)D - pos_embed = torch.cat(pos_embed, dim=0).unsqueeze(0) # 1(L|L+1)D - - x = x + pos_embed - x = self.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - - x = self.ln_post(x[:, 0, :]) - - x = x @ self.proj - - return x - - -# SIZES -B32_Params = { - "input_resolution": 224, - "patch_size": 32, - "width": 768, - "layers": 12, - "heads": 12, - "output_dim": 512, -} - -B16_Params = { - "input_resolution": 224, - "patch_size": 16, - "width": 768, - "layers": 12, - "heads": 12, - "output_dim": 512, -} - -params = {"B32": B32_Params, "B16": B16_Params} \ No newline at end of file diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/metrics/fad.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/metrics/fad.py deleted file mode 100644 index de66138dbb14fd4246bbfe590bddfd5beaf1ed8c..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/metrics/fad.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from pathlib import Path -import os -import subprocess -import tempfile -import typing as tp - -from audiocraft.data.audio import audio_write -from audiocraft.data.audio_utils import convert_audio -import flashy -import torch -import torchmetrics - -from ..environment import AudioCraftEnvironment - - -logger = logging.getLogger(__name__) - -VGGISH_SAMPLE_RATE = 16_000 -VGGISH_CHANNELS = 1 - - -class FrechetAudioDistanceMetric(torchmetrics.Metric): - """Fréchet Audio Distance computation based on official TensorFlow implementation from Google Research. - - From: D.C. Dowson & B.V. Landau The Fréchet distance between - multivariate normal distributions - https://doi.org/10.1016/0047-259X(82)90077-X - The Fréchet distance between two multivariate gaussians, - `X ~ N(mu_x, sigma_x)` and `Y ~ N(mu_y, sigma_y)`, is `d^2`. - d^2 = (mu_x - mu_y)^2 + Tr(sigma_x + sigma_y - 2 * sqrt(sigma_x*sigma_y)) - = (mu_x - mu_y)^2 + Tr(sigma_x) + Tr(sigma_y) - - 2 * Tr(sqrt(sigma_x*sigma_y))) - - To use this FAD computation metric, you need to have the proper Frechet Audio Distance tool setup - from: https://github.com/google-research/google-research/tree/master/frechet_audio_distance - We provide the below instructions as reference but we do not guarantee for further support - in frechet_audio_distance installation. This was tested with python 3.10, cuda 11.8, tensorflow 2.12.0. - - We recommend installing the frechet_audio_distance library in a dedicated env (e.g. conda). - - 1. Get the code and models following the repository instructions. We used the steps below: - git clone git@github.com:google-research/google-research.git - git clone git@github.com:tensorflow/models.git - mkdir google-research/tensorflow_models - touch google-research/tensorflow_models/__init__.py - cp -r models/research/audioset google-research/tensorflow_models/ - touch google-research/tensorflow_models/audioset/__init__.py - echo "from .vggish import mel_features, vggish_params, vggish_slim" > \ - google-research/tensorflow_models/audioset/__init__.py - # we can now remove the tensorflow models repository - # rm -r models - cd google-research - Follow the instructions to download the vggish checkpoint. AudioCraft base configuration - assumes it is placed in the AudioCraft reference dir. - - Note that we operate the following changes for the code to work with TensorFlow 2.X and python 3: - - Update xrange for range in: - https://github.com/google-research/google-research/blob/master/frechet_audio_distance/audioset_model.py - - Update `tf_record = tf.python_io.tf_record_iterator(filename).next()` to - `tf_record = tf.python_io.tf_record_iterator(filename).__next__()` in - https://github.com/google-research/google-research/blob/master/frechet_audio_distance/fad_utils.py - - Update `import vggish_params as params` to `from . import vggish_params as params` in: - https://github.com/tensorflow/models/blob/master/research/audioset/vggish/vggish_slim.py - - Add flag to provide a given batch size for running the AudioSet model in: - https://github.com/google-research/google-research/blob/master/frechet_audio_distance/create_embeddings_main.py - ``` - flags.DEFINE_integer('batch_size', 64, - 'Number of samples in the batch for AudioSet model.') - ``` - Ensure you pass the flag to the create_embeddings_beam.create_pipeline function, adding: - `batch_size=FLAGS.batch_size` to the provided parameters. - - 2. Follow instructions for the library installation and a valid TensorFlow installation - ``` - # e.g. instructions from: https://www.tensorflow.org/install/pip - conda install -c conda-forge cudatoolkit=11.8.0 - python3 -m pip install nvidia-cudnn-cu11==8.6.0.163 tensorflow==2.12.* - mkdir -p $CONDA_PREFIX/etc/conda/activate.d - echo 'CUDNN_PATH=$(dirname $(python -c "import nvidia.cudnn;print(nvidia.cudnn.__file__)"))' \ - >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/:$CUDNN_PATH/lib' \ - >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - source $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - # Verify install: on a machine with GPU device - python3 -c "import tensorflow as tf; print(tf.config.list_physical_devices('GPU'))" - ``` - - Now install frechet_audio_distance required dependencies: - ``` - # We assume we already have TensorFlow installed from the above steps - pip install apache-beam numpy scipy tf_slim - ``` - - Finally, follow remaining library instructions to ensure you have a working frechet_audio_distance setup - (you may want to specify --model_ckpt flag pointing to the model's path). - - 3. AudioCraft's FrechetAudioDistanceMetric requires 2 environment variables pointing to the python executable - and Tensorflow library path from the above installation steps: - export TF_PYTHON_EXE="" - export TF_LIBRARY_PATH="" - - e.g. assuming we have installed everything in a dedicated conda env - with python 3.10 that is currently active: - export TF_PYTHON_EXE="$CONDA_PREFIX/bin/python" - export TF_LIBRARY_PATH="$CONDA_PREFIX/lib/python3.10/site-packages/nvidia/cudnn/lib" - - Finally you may want to export the following variable: - export TF_FORCE_GPU_ALLOW_GROWTH=true - See: https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth - - You can save those environment variables in your training conda env, when currently active: - `$CONDA_PREFIX/etc/conda/activate.d/env_vars.sh` - e.g. assuming the env with TensorFlow and frechet_audio_distance install is named ac_eval, - and the training conda env is named audiocraft: - ``` - # activate training env - conda activate audiocraft - # get path to all envs - CONDA_ENV_DIR=$(dirname $CONDA_PREFIX) - # export pointers to evaluation env for using TensorFlow in FrechetAudioDistanceMetric - touch $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - echo 'export TF_PYTHON_EXE="$CONDA_ENV_DIR/ac_eval/bin/python"' >> \ - $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - echo 'export TF_LIBRARY_PATH="$CONDA_ENV_DIR/ac_eval/lib/python3.10/site-packages/nvidia/cudnn/lib"' >> \ - $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - # optionally: - echo 'export TF_FORCE_GPU_ALLOW_GROWTH=true' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - # you may need to reactivate the audiocraft env for this to take effect - ``` - - Args: - bin (Path or str): Path to installed frechet audio distance code. - model_path (Path or str): Path to Tensorflow checkpoint for the model - used to compute statistics over the embedding beams. - format (str): Audio format used to save files. - log_folder (Path or str, optional): Path where to write process logs. - """ - def __init__(self, bin: tp.Union[Path, str], model_path: tp.Union[Path, str], - format: str = "wav", batch_size: tp.Optional[int] = None, - log_folder: tp.Optional[tp.Union[Path, str]] = None): - super().__init__() - self.model_sample_rate = VGGISH_SAMPLE_RATE - self.model_channels = VGGISH_CHANNELS - self.model_path = AudioCraftEnvironment.resolve_reference_path(model_path) - assert Path(self.model_path).exists(), f"Could not find provided model checkpoint path at: {self.model_path}" - self.format = format - self.batch_size = batch_size - self.bin = bin - self.tf_env = {"PYTHONPATH": str(self.bin)} - self.python_path = os.environ.get('TF_PYTHON_EXE') or 'python' - logger.info("Python exe for TF is %s", self.python_path) - if 'TF_LIBRARY_PATH' in os.environ: - self.tf_env['LD_LIBRARY_PATH'] = os.environ['TF_LIBRARY_PATH'] - if 'TF_FORCE_GPU_ALLOW_GROWTH' in os.environ: - self.tf_env['TF_FORCE_GPU_ALLOW_GROWTH'] = os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] - logger.info("Env for TF is %r", self.tf_env) - self.reset(log_folder) - self.add_state("total_files", default=torch.tensor(0.), dist_reduce_fx="sum") - - def reset(self, log_folder: tp.Optional[tp.Union[Path, str]] = None): - """Reset torchmetrics.Metrics state.""" - log_folder = Path(log_folder or tempfile.mkdtemp()) - self.tmp_dir = log_folder / 'fad' - self.tmp_dir.mkdir(exist_ok=True) - self.samples_tests_dir = self.tmp_dir / 'tests' - self.samples_tests_dir.mkdir(exist_ok=True) - self.samples_background_dir = self.tmp_dir / 'background' - self.samples_background_dir.mkdir(exist_ok=True) - self.manifest_tests = self.tmp_dir / 'files_tests.cvs' - self.manifest_background = self.tmp_dir / 'files_background.cvs' - self.stats_tests_dir = self.tmp_dir / 'stats_tests' - self.stats_background_dir = self.tmp_dir / 'stats_background' - self.counter = 0 - - def update(self, preds: torch.Tensor, targets: torch.Tensor, - sizes: torch.Tensor, sample_rates: torch.Tensor, - stems: tp.Optional[tp.List[str]] = None): - """Update torchmetrics.Metrics by saving the audio and updating the manifest file.""" - assert preds.shape == targets.shape, f"preds={preds.shape} != targets={targets.shape}" - num_samples = preds.shape[0] - assert num_samples == sizes.size(0) and num_samples == sample_rates.size(0) - assert stems is None or num_samples == len(set(stems)) - for i in range(num_samples): - self.total_files += 1 # type: ignore - self.counter += 1 - wav_len = int(sizes[i].item()) - sample_rate = int(sample_rates[i].item()) - pred_wav = preds[i] - target_wav = targets[i] - pred_wav = pred_wav[..., :wav_len] - target_wav = target_wav[..., :wav_len] - stem_name = stems[i] if stems is not None else f'sample_{self.counter}_{flashy.distrib.rank()}' - # dump audio files - try: - pred_wav = convert_audio( - pred_wav.unsqueeze(0), from_rate=sample_rate, - to_rate=self.model_sample_rate, to_channels=1).squeeze(0) - audio_write( - self.samples_tests_dir / stem_name, pred_wav, sample_rate=self.model_sample_rate, - format=self.format, strategy="peak") - except Exception as e: - logger.error(f"Exception occured when saving tests files for FAD computation: {repr(e)} - {e}") - try: - # for the ground truth audio, we enforce the 'peak' strategy to avoid modifying - # the original audio when writing it - target_wav = convert_audio( - target_wav.unsqueeze(0), from_rate=sample_rate, - to_rate=self.model_sample_rate, to_channels=1).squeeze(0) - audio_write( - self.samples_background_dir / stem_name, target_wav, sample_rate=self.model_sample_rate, - format=self.format, strategy="peak") - except Exception as e: - logger.error(f"Exception occured when saving background files for FAD computation: {repr(e)} - {e}") - - def _get_samples_name(self, is_background: bool): - return 'background' if is_background else 'tests' - - def _create_embedding_beams(self, is_background: bool, gpu_index: tp.Optional[int] = None): - if is_background: - input_samples_dir = self.samples_background_dir - input_filename = self.manifest_background - stats_name = self.stats_background_dir - else: - input_samples_dir = self.samples_tests_dir - input_filename = self.manifest_tests - stats_name = self.stats_tests_dir - beams_name = self._get_samples_name(is_background) - log_file = self.tmp_dir / f'fad_logs_create_beams_{beams_name}.log' - - logger.info(f"Scanning samples folder to fetch list of files: {input_samples_dir}") - with open(input_filename, "w") as fout: - for path in Path(input_samples_dir).glob(f"*.{self.format}"): - fout.write(f"{str(path)}\n") - - cmd = [ - self.python_path, "-m", - "frechet_audio_distance.create_embeddings_main", - "--model_ckpt", f"{self.model_path}", - "--input_files", f"{str(input_filename)}", - "--stats", f"{str(stats_name)}", - ] - if self.batch_size is not None: - cmd += ["--batch_size", str(self.batch_size)] - logger.info(f"Launching frechet_audio_distance embeddings main method: {' '.join(cmd)} on {beams_name}") - env = os.environ - if gpu_index is not None: - env["CUDA_VISIBLE_DEVICES"] = str(gpu_index) - process = subprocess.Popen( - cmd, stdout=open(log_file, "w"), env={**env, **self.tf_env}, stderr=subprocess.STDOUT) - return process, log_file - - def _compute_fad_score(self, gpu_index: tp.Optional[int] = None): - cmd = [ - self.python_path, "-m", "frechet_audio_distance.compute_fad", - "--test_stats", f"{str(self.stats_tests_dir)}", - "--background_stats", f"{str(self.stats_background_dir)}", - ] - logger.info(f"Launching frechet_audio_distance compute fad method: {' '.join(cmd)}") - env = os.environ - if gpu_index is not None: - env["CUDA_VISIBLE_DEVICES"] = str(gpu_index) - result = subprocess.run(cmd, env={**env, **self.tf_env}, capture_output=True) - if result.returncode: - logger.error( - "Error with FAD computation from stats: \n %s \n %s", - result.stdout.decode(), result.stderr.decode() - ) - raise RuntimeError("Error while executing FAD computation from stats") - try: - # result is "FAD: (d+).(d+)" hence we remove the prefix with (d+) being one digit or more - fad_score = float(result.stdout[4:]) - return fad_score - except Exception as e: - raise RuntimeError(f"Error parsing FAD score from command stdout: {e}") - - def _log_process_result(self, returncode: int, log_file: tp.Union[Path, str], is_background: bool) -> None: - beams_name = self._get_samples_name(is_background) - if returncode: - with open(log_file, "r") as f: - error_log = f.read() - logger.error(error_log) - os._exit(1) - else: - logger.info(f"Successfully computed embedding beams on {beams_name} samples.") - - def _parallel_create_embedding_beams(self, num_of_gpus: int): - assert num_of_gpus > 0 - logger.info("Creating embeddings beams in a parallel manner on different GPUs") - tests_beams_process, tests_beams_log_file = self._create_embedding_beams(is_background=False, gpu_index=0) - bg_beams_process, bg_beams_log_file = self._create_embedding_beams(is_background=True, gpu_index=1) - tests_beams_code = tests_beams_process.wait() - bg_beams_code = bg_beams_process.wait() - self._log_process_result(tests_beams_code, tests_beams_log_file, is_background=False) - self._log_process_result(bg_beams_code, bg_beams_log_file, is_background=True) - - def _sequential_create_embedding_beams(self): - logger.info("Creating embeddings beams in a sequential manner") - tests_beams_process, tests_beams_log_file = self._create_embedding_beams(is_background=False) - tests_beams_code = tests_beams_process.wait() - self._log_process_result(tests_beams_code, tests_beams_log_file, is_background=False) - bg_beams_process, bg_beams_log_file = self._create_embedding_beams(is_background=True) - bg_beams_code = bg_beams_process.wait() - self._log_process_result(bg_beams_code, bg_beams_log_file, is_background=True) - - @flashy.distrib.rank_zero_only - def _local_compute_frechet_audio_distance(self): - """Compute Frechet Audio Distance score calling TensorFlow API.""" - num_of_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0 - if num_of_gpus > 1: - self._parallel_create_embedding_beams(num_of_gpus) - else: - self._sequential_create_embedding_beams() - fad_score = self._compute_fad_score(gpu_index=0) - return fad_score - - def compute(self) -> float: - """Compute metrics.""" - assert self.total_files.item() > 0, "No files dumped for FAD computation!" # type: ignore - fad_score = self._local_compute_frechet_audio_distance() - logger.warning(f"FAD score = {fad_score}") - fad_score = flashy.distrib.broadcast_object(fad_score, src=0) - return fad_score diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/__init__.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/__init__.py deleted file mode 100644 index 378a0068432a371af364de9d73785901c0f83383..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/utils/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# flake8: noqa -# Copyright (c) OpenMMLab. All rights reserved. -from .config import Config, ConfigDict, DictAction -from .misc import (check_prerequisites, concat_list, deprecated_api_warning, - has_method, import_modules_from_strings, is_list_of, - is_method_overridden, is_seq_of, is_str, is_tuple_of, - iter_cast, list_cast, requires_executable, requires_package, - slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple, - to_ntuple, tuple_cast) -from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist, - scandir, symlink) -from .progressbar import (ProgressBar, track_iter_progress, - track_parallel_progress, track_progress) -from .testing import (assert_attrs_equal, assert_dict_contains_subset, - assert_dict_has_keys, assert_is_norm_layer, - assert_keys_equal, assert_params_all_zeros, - check_python_script) -from .timer import Timer, TimerError, check_time -from .version_utils import digit_version, get_git_hash - -try: - import torch -except ImportError: - __all__ = [ - 'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast', - 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of', - 'slice_list', 'concat_list', 'check_prerequisites', 'requires_package', - 'requires_executable', 'is_filepath', 'fopen', 'check_file_exist', - 'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar', - 'track_progress', 'track_iter_progress', 'track_parallel_progress', - 'Timer', 'TimerError', 'check_time', 'deprecated_api_warning', - 'digit_version', 'get_git_hash', 'import_modules_from_strings', - 'assert_dict_contains_subset', 'assert_attrs_equal', - 'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script', - 'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple', - 'is_method_overridden', 'has_method' - ] -else: - from .env import collect_env - from .logging import get_logger, print_log - from .parrots_jit import jit, skip_no_elena - from .parrots_wrapper import ( - TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader, - PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, - _AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm, - _MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home) - from .registry import Registry, build_from_cfg - from .trace import is_jit_tracing - __all__ = [ - 'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger', - 'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', - 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', - 'check_prerequisites', 'requires_package', 'requires_executable', - 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', - 'symlink', 'scandir', 'ProgressBar', 'track_progress', - 'track_iter_progress', 'track_parallel_progress', 'Registry', - 'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm', - '_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm', - '_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd', - 'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension', - 'DataLoader', 'PoolDataLoader', 'TORCH_VERSION', - 'deprecated_api_warning', 'digit_version', 'get_git_hash', - 'import_modules_from_strings', 'jit', 'skip_no_elena', - 'assert_dict_contains_subset', 'assert_attrs_equal', - 'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer', - 'assert_params_all_zeros', 'check_python_script', - 'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch', - '_get_cuda_home', 'has_method' - ] diff --git a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/layers/attractor.py b/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/layers/attractor.py deleted file mode 100644 index 2a8efe645adea1d88a12e2ac5cc6bb2a251eef9d..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/layers/attractor.py +++ /dev/null @@ -1,208 +0,0 @@ -# MIT License - -# Copyright (c) 2022 Intelligent Systems Lab Org - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# File author: Shariq Farooq Bhat - -import torch -import torch.nn as nn - - -@torch.jit.script -def exp_attractor(dx, alpha: float = 300, gamma: int = 2): - """Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor - - Args: - dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center. - alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300. - gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2. - - Returns: - torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc - """ - return torch.exp(-alpha*(torch.abs(dx)**gamma)) * (dx) - - -@torch.jit.script -def inv_attractor(dx, alpha: float = 300, gamma: int = 2): - """Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center - This is the default one according to the accompanying paper. - - Args: - dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center. - alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300. - gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2. - - Returns: - torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc - """ - return dx.div(1+alpha*dx.pow(gamma)) - - -class AttractorLayer(nn.Module): - def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10, - alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): - """ - Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth) - """ - super().__init__() - - self.n_attractors = n_attractors - self.n_bins = n_bins - self.min_depth = min_depth - self.max_depth = max_depth - self.alpha = alpha - self.gamma = gamma - self.kind = kind - self.attractor_type = attractor_type - self.memory_efficient = memory_efficient - - self._net = nn.Sequential( - nn.Conv2d(in_features, mlp_dim, 1, 1, 0), - nn.ReLU(inplace=True), - nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm - nn.ReLU(inplace=True) - ) - - def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): - """ - Args: - x (torch.Tensor) : feature block; shape - n, c, h, w - b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w - - Returns: - tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w - """ - if prev_b_embedding is not None: - if interpolate: - prev_b_embedding = nn.functional.interpolate( - prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) - x = x + prev_b_embedding - - A = self._net(x) - eps = 1e-3 - A = A + eps - n, c, h, w = A.shape - A = A.view(n, self.n_attractors, 2, h, w) - A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w - A_normed = A[:, :, 0, ...] # n, na, h, w - - b_prev = nn.functional.interpolate( - b_prev, (h, w), mode='bilinear', align_corners=True) - b_centers = b_prev - - if self.attractor_type == 'exp': - dist = exp_attractor - else: - dist = inv_attractor - - if not self.memory_efficient: - func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] - # .shape N, nbins, h, w - delta_c = func(dist(A_normed.unsqueeze( - 2) - b_centers.unsqueeze(1)), dim=1) - else: - delta_c = torch.zeros_like(b_centers, device=b_centers.device) - for i in range(self.n_attractors): - # .shape N, nbins, h, w - delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers) - - if self.kind == 'mean': - delta_c = delta_c / self.n_attractors - - b_new_centers = b_centers + delta_c - B_centers = (self.max_depth - self.min_depth) * \ - b_new_centers + self.min_depth - B_centers, _ = torch.sort(B_centers, dim=1) - B_centers = torch.clip(B_centers, self.min_depth, self.max_depth) - return b_new_centers, B_centers - - -class AttractorLayerUnnormed(nn.Module): - def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10, - alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): - """ - Attractor layer for bin centers. Bin centers are unbounded - """ - super().__init__() - - self.n_attractors = n_attractors - self.n_bins = n_bins - self.min_depth = min_depth - self.max_depth = max_depth - self.alpha = alpha - self.gamma = gamma - self.kind = kind - self.attractor_type = attractor_type - self.memory_efficient = memory_efficient - - self._net = nn.Sequential( - nn.Conv2d(in_features, mlp_dim, 1, 1, 0), - nn.ReLU(inplace=True), - nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0), - nn.Softplus() - ) - - def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): - """ - Args: - x (torch.Tensor) : feature block; shape - n, c, h, w - b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w - - Returns: - tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version - """ - if prev_b_embedding is not None: - if interpolate: - prev_b_embedding = nn.functional.interpolate( - prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) - x = x + prev_b_embedding - - A = self._net(x) - n, c, h, w = A.shape - - b_prev = nn.functional.interpolate( - b_prev, (h, w), mode='bilinear', align_corners=True) - b_centers = b_prev - - if self.attractor_type == 'exp': - dist = exp_attractor - else: - dist = inv_attractor - - if not self.memory_efficient: - func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] - # .shape N, nbins, h, w - delta_c = func( - dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1) - else: - delta_c = torch.zeros_like(b_centers, device=b_centers.device) - for i in range(self.n_attractors): - delta_c += dist(A[:, i, ...].unsqueeze(1) - - b_centers) # .shape N, nbins, h, w - - if self.kind == 'mean': - delta_c = delta_c / self.n_attractors - - b_new_centers = b_centers + delta_c - B_centers = b_new_centers - - return b_new_centers, B_centers diff --git a/spaces/Surfrider/surfnet/tracking/gps.py b/spaces/Surfrider/surfnet/tracking/gps.py deleted file mode 100644 index ea63bde6689a8ea75b2cc409d4f5b16285de0893..0000000000000000000000000000000000000000 --- a/spaces/Surfrider/surfnet/tracking/gps.py +++ /dev/null @@ -1,268 +0,0 @@ -import json -import datetime -import pandas as pd -import geopandas -from datetime import timedelta - -def parse_json(file_obj)->dict: - """Parse a JSON file produced by Plastic Origin Mobile App - - Args: - file_obj (str): a file_obj from gradio input File type - - Returns: - dict: the json data as a dictionnary - """ - with open(file_obj.name) as json_file: - json_data = json.load(json_file) - - return json_data - - -def get_json_gps_list(json_data:dict)->list: - """Get a list of GPS point from a json_data object - - Args: - json_data (dict): the gps data as a json dict - - Returns: - list: a list of GPS point - """ - point_list = [] - for point in json_data['positions']: - time = datetime.datetime.strptime(point['date'][:19].replace("T"," "),'%Y-%m-%d %H:%M:%S') - point_info = {'Time': time, 'Latitude': point['lat'], - 'Longitude': point['lng'], 'Elevation': 0} - point_list.append(point_info) - return point_list - - -def create_time(time:datetime)->datetime: - """Create time by adding 1 second to time input - - Arguments: - time {datetime} -- a time value - - Returns: - new_time -- the new time created by adding 1 second - """ - new_time = time - new_time = new_time + timedelta(seconds=1) - return new_time - - -def create_latitude(lat1:float, lat2:float)->float: - """Create latitude as the average of lat1 and lat2 - - Arguments: - lat1 {float} -- a first latitude value - lat2 {float} -- a second latitute value - - Returns: - new_latitude -- the average latitude - """ - new_latitude = (lat1+lat2)/2 - new_latitude = round(new_latitude, 6) - return new_latitude - - -def create_longitude(long1:float, long2:float)->float: - """Create longitude as the average of long1 and long2 - - Arguments: - long1 {float} -- a first longitude value - long2 {float} -- a second longitude value - - Returns: - new_longitude -- the average longitude - """ - new_longitude = (long1+long2)/2 - new_longitude = round(new_longitude, 6) - return new_longitude - - -def create_elevation(elev1:float, elev2:float)->float: - - new_elevation = (elev1+elev2)/2 - new_elevation = round(new_elevation, 6) - return new_elevation - - -def fill_gps(input_gps_list:list, video_length:float)->list: - """Fill an input gps list when there are missing value with regard to time(second) - - Arguments: - input_gps_list {list} -- a list of gps point - video_length {float} -- the length of related video from which gps point are taken from - - Returns: - filled_gps -- the list of gps point filled with regard to time - """ - filled_gps = input_gps_list.copy() - gps_length = len(filled_gps) - iteration_length = int( - (filled_gps[gps_length-1]['Time'] - filled_gps[0]['Time']).total_seconds()) - # this section output a filled gps list of length iteration_length+1 = Delta T between last gps timestamp and first one - i = 0 - while i < (iteration_length): - delta = filled_gps[i+1]['Time']-filled_gps[i]['Time'] - delta = int(delta.total_seconds()) - if delta > 1: # adding a newly created element at index i+1 - missing_time = create_time(filled_gps[i]['Time']) - missing_latitude = create_latitude( - filled_gps[i]['Latitude'], filled_gps[i+1]['Latitude']) - missing_longitude = create_longitude( - filled_gps[i]['Longitude'], filled_gps[i+1]['Longitude']) - missing_elevation = create_elevation( - filled_gps[i]['Elevation'], filled_gps[i+1]['Elevation']) - new_gps = {'Time': missing_time, 'Latitude': missing_latitude, - 'Longitude': missing_longitude, 'Elevation': missing_elevation} - filled_gps.insert(i+1, new_gps) - i = i+1 - # this section add missing point at the end of the list, in case filled_gps initial Delta time length is less than actual video length - if len(filled_gps) < video_length: - j = 0 - while len(filled_gps) < video_length: - filled_gps.insert(len(filled_gps), filled_gps[len(filled_gps)-1]) - j = j+1 - - return filled_gps - - -def map_label_to_trash_id_PG(label:str)->str: - """Map label of a trash to equivalent ID within PostGre server - - Arguments: - label {str} -- the label of the trash - - Returns: - id_PG -- the equivalent id within PG Trash table of trash label - """ - switcher = { - 'Fragment':0, #'Sheet / tarp / plastic bag / fragment', - 'Insulating':1, #'Insulating material', - 'Bottle':2, #'Bottle-shaped', - 'Can':3, #'Can-shaped', - 'Drum':4, - 'Packaging':5, #'Other packaging', - 'Tire':6, - 'Fishing net':7, #'Fishing net / cord', - 'Easily namable':8, - 'Unclear':9 - } - - id_PG = switcher.get(label, "0") - return id_PG - - -def get_trash_label(frame_to_box:dict)->str: - """Get label from a frame_to_box dictionnary from an AI prediction - - Arguments: - frame_to_box {dict} -- the data for a unique trash from the AI prediction - - Returns: - frame_to_box['label'] -- the label value predicted by the AI for a trash - """ - return frame_to_box['label'] - - -def get_trash_first_time(trash:dict)->int: - """Get the time index for a trash, the first time it is identified - - Arguments: - trash {dict} -- [description] - - Returns: - fist_index -- the index when the trash is identified for the first time - """ - frame_to_box = trash['frame_to_box'] - first_index = int(list(frame_to_box.keys())[0]) - return first_index - - -def get_trash_time_index(prediction:dict,media_fps:float)->int: - """ Get trash time stamp - - Arguments: - prediction {dict} -- the prediction made by AI of a unique trash - media_fps {float} -- the FPS of the media where the trash comes from - - Returns: - time_index -- the timestamp of the trash with regard to video it comes from - """ - first_index = get_trash_first_time(prediction) - time_index = int(first_index / media_fps) - return time_index - - -def get_clean_timed_prediction(prediction:dict,media_fps:int)->dict: - """Get timed prediction with single frame_to_box - - Arguments: - prediction {dict} -- a single prediction from a dictionary of AI predictions - media_fps {float} -- the FPS of the media where the trash comes from - - Returns: - timed_prediction -- a prediction with the first frame_to_box only & a time_index additional key/value pair - """ - first_index = str(get_trash_first_time(prediction)) - clean_frame_to_box = prediction['frame_to_box'][first_index] - time_index = get_trash_time_index(prediction,media_fps) - trash_type_id = int(map_label_to_trash_id_PG(prediction['label'])) - timed_prediction = {'time_index':int(time_index),'frame_to_box':clean_frame_to_box,'id':prediction['id'],'label':prediction['label'],'trash_type_id':trash_type_id} - return timed_prediction - - -def get_df_prediction(json_prediction:dict,media_fps)->pd.DataFrame: - """Get AI prediction dictionnary as Dataframe - - Arguments: - json_prediction {dict} -- a full prediction of AI service as JSON dico - media_fps {float} -- the FPS of the media where the trash comes from - - Returns: - df_prediction -- the AI prediction as a Dataframe - """ - timed_prediction_list = [] - for prediction in json_prediction['detected_trash']: - timed_prediction_list.append(get_clean_timed_prediction(prediction,media_fps)) - df_prediction = pd.DataFrame(timed_prediction_list) - - return df_prediction - - -def get_trash_gps_df(predictions_df:pd.DataFrame,gps_points_filled:list)->pd.DataFrame: - """Get a dataframe with Trash & GPS data alltogether - - Args: - predictions_df (pd.DataFrame): AI predictions from Surfnet as a Dataframe - gps_points_filled (list): GPS points filled list from mobile GPS tracking - - Returns: - data_df (pd.DataFrame): a dataframe with Trash & GPS data - """ - trash_gps_list = [] - #time_indexes = predictions_df['time_index'] - # Twist to display trashes on different seconds and avoid from overlaping on map - time_indexes= range(0,len(predictions_df['time_index'])) - for time_index in time_indexes: - trash_gps = gps_points_filled[time_index] - trash_gps_list.append(trash_gps) - trash_gps_df = pd.DataFrame(trash_gps_list) - data_df = pd.concat([predictions_df,trash_gps_df],axis=1) - return data_df - - -def get_trash_gps_geo_df(trash_gps_df:pd.DataFrame)->pd.DataFrame: - """Get a geo dataframe from a Trash & GPS dataframe - - Args: - trash_gps_df (pd.DataFrame): a dataframe with Trash & GPS data from get_trash_gps_df - - Returns: - trash_gps_gdf (pd.DataFrame): a geo dataframe with added geometry columns - """ - trash_gps_gdf = geopandas.GeoDataFrame( - trash_gps_df, geometry=geopandas.points_from_xy(trash_gps_df.Longitude, trash_gps_df.Latitude)) - return trash_gps_gdf diff --git a/spaces/TNR-5/Music-discord-bot/README.md b/spaces/TNR-5/Music-discord-bot/README.md deleted file mode 100644 index 292b07ca5abd1fd2e0e9185cff8852be5d9a9a59..0000000000000000000000000000000000000000 --- a/spaces/TNR-5/Music-discord-bot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Music Discord Bot -emoji: 🏢 -colorFrom: gray -colorTo: blue -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -duplicated_from: katasou/Music-discord-bot ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py deleted file mode 100644 index 27c69f0d1eaf3e223d599e91f969d52a821426fe..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/operations/build/metadata_editable.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Metadata generation logic for source distributions. -""" - -import os - -from pip._vendor.pyproject_hooks import BuildBackendHookCaller - -from pip._internal.build_env import BuildEnvironment -from pip._internal.exceptions import ( - InstallationSubprocessError, - MetadataGenerationFailed, -) -from pip._internal.utils.subprocess import runner_with_spinner_message -from pip._internal.utils.temp_dir import TempDirectory - - -def generate_editable_metadata( - build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str -) -> str: - """Generate metadata using mechanisms described in PEP 660. - - Returns the generated metadata directory. - """ - metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True) - - metadata_dir = metadata_tmpdir.path - - with build_env: - # Note that BuildBackendHookCaller implements a fallback for - # prepare_metadata_for_build_wheel/editable, so we don't have to - # consider the possibility that this hook doesn't exist. - runner = runner_with_spinner_message( - "Preparing editable metadata (pyproject.toml)" - ) - with backend.subprocess_runner(runner): - try: - distinfo_dir = backend.prepare_metadata_for_build_editable(metadata_dir) - except InstallationSubprocessError as error: - raise MetadataGenerationFailed(package_details=details) from error - - return os.path.join(metadata_dir, distinfo_dir) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py deleted file mode 100644 index 0331297b85b89c3387c3868d6254f420ed6a0381..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py +++ /dev/null @@ -1,730 +0,0 @@ -import contextlib -import functools -import logging -from typing import ( - TYPE_CHECKING, - Dict, - FrozenSet, - Iterable, - Iterator, - List, - Mapping, - NamedTuple, - Optional, - Sequence, - Set, - Tuple, - TypeVar, - cast, -) - -from pip._vendor.packaging.requirements import InvalidRequirement -from pip._vendor.packaging.specifiers import SpecifierSet -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.resolvelib import ResolutionImpossible - -from pip._internal.cache import CacheEntry, WheelCache -from pip._internal.exceptions import ( - DistributionNotFound, - InstallationError, - MetadataInconsistent, - UnsupportedPythonVersion, - UnsupportedWheel, -) -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata import BaseDistribution, get_default_environment -from pip._internal.models.link import Link -from pip._internal.models.wheel import Wheel -from pip._internal.operations.prepare import RequirementPreparer -from pip._internal.req.constructors import install_req_from_link_and_ireq -from pip._internal.req.req_install import ( - InstallRequirement, - check_invalid_constraint_type, -) -from pip._internal.resolution.base import InstallRequirementProvider -from pip._internal.utils.compatibility_tags import get_supported -from pip._internal.utils.hashes import Hashes -from pip._internal.utils.packaging import get_requirement -from pip._internal.utils.virtualenv import running_under_virtualenv - -from .base import Candidate, CandidateVersion, Constraint, Requirement -from .candidates import ( - AlreadyInstalledCandidate, - BaseCandidate, - EditableCandidate, - ExtrasCandidate, - LinkCandidate, - RequiresPythonCandidate, - as_base_candidate, -) -from .found_candidates import FoundCandidates, IndexCandidateInfo -from .requirements import ( - ExplicitRequirement, - RequiresPythonRequirement, - SpecifierRequirement, - UnsatisfiableRequirement, -) - -if TYPE_CHECKING: - from typing import Protocol - - class ConflictCause(Protocol): - requirement: RequiresPythonRequirement - parent: Candidate - - -logger = logging.getLogger(__name__) - -C = TypeVar("C") -Cache = Dict[Link, C] - - -class CollectedRootRequirements(NamedTuple): - requirements: List[Requirement] - constraints: Dict[str, Constraint] - user_requested: Dict[str, int] - - -class Factory: - def __init__( - self, - finder: PackageFinder, - preparer: RequirementPreparer, - make_install_req: InstallRequirementProvider, - wheel_cache: Optional[WheelCache], - use_user_site: bool, - force_reinstall: bool, - ignore_installed: bool, - ignore_requires_python: bool, - py_version_info: Optional[Tuple[int, ...]] = None, - ) -> None: - self._finder = finder - self.preparer = preparer - self._wheel_cache = wheel_cache - self._python_candidate = RequiresPythonCandidate(py_version_info) - self._make_install_req_from_spec = make_install_req - self._use_user_site = use_user_site - self._force_reinstall = force_reinstall - self._ignore_requires_python = ignore_requires_python - - self._build_failures: Cache[InstallationError] = {} - self._link_candidate_cache: Cache[LinkCandidate] = {} - self._editable_candidate_cache: Cache[EditableCandidate] = {} - self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {} - self._extras_candidate_cache: Dict[ - Tuple[int, FrozenSet[str]], ExtrasCandidate - ] = {} - - if not ignore_installed: - env = get_default_environment() - self._installed_dists = { - dist.canonical_name: dist - for dist in env.iter_installed_distributions(local_only=False) - } - else: - self._installed_dists = {} - - @property - def force_reinstall(self) -> bool: - return self._force_reinstall - - def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None: - if not link.is_wheel: - return - wheel = Wheel(link.filename) - if wheel.supported(self._finder.target_python.get_tags()): - return - msg = f"{link.filename} is not a supported wheel on this platform." - raise UnsupportedWheel(msg) - - def _make_extras_candidate( - self, base: BaseCandidate, extras: FrozenSet[str] - ) -> ExtrasCandidate: - cache_key = (id(base), extras) - try: - candidate = self._extras_candidate_cache[cache_key] - except KeyError: - candidate = ExtrasCandidate(base, extras) - self._extras_candidate_cache[cache_key] = candidate - return candidate - - def _make_candidate_from_dist( - self, - dist: BaseDistribution, - extras: FrozenSet[str], - template: InstallRequirement, - ) -> Candidate: - try: - base = self._installed_candidate_cache[dist.canonical_name] - except KeyError: - base = AlreadyInstalledCandidate(dist, template, factory=self) - self._installed_candidate_cache[dist.canonical_name] = base - if not extras: - return base - return self._make_extras_candidate(base, extras) - - def _make_candidate_from_link( - self, - link: Link, - extras: FrozenSet[str], - template: InstallRequirement, - name: Optional[NormalizedName], - version: Optional[CandidateVersion], - ) -> Optional[Candidate]: - # TODO: Check already installed candidate, and use it if the link and - # editable flag match. - - if link in self._build_failures: - # We already tried this candidate before, and it does not build. - # Don't bother trying again. - return None - - if template.editable: - if link not in self._editable_candidate_cache: - try: - self._editable_candidate_cache[link] = EditableCandidate( - link, - template, - factory=self, - name=name, - version=version, - ) - except MetadataInconsistent as e: - logger.info( - "Discarding [blue underline]%s[/]: [yellow]%s[reset]", - link, - e, - extra={"markup": True}, - ) - self._build_failures[link] = e - return None - - base: BaseCandidate = self._editable_candidate_cache[link] - else: - if link not in self._link_candidate_cache: - try: - self._link_candidate_cache[link] = LinkCandidate( - link, - template, - factory=self, - name=name, - version=version, - ) - except MetadataInconsistent as e: - logger.info( - "Discarding [blue underline]%s[/]: [yellow]%s[reset]", - link, - e, - extra={"markup": True}, - ) - self._build_failures[link] = e - return None - base = self._link_candidate_cache[link] - - if not extras: - return base - return self._make_extras_candidate(base, extras) - - def _iter_found_candidates( - self, - ireqs: Sequence[InstallRequirement], - specifier: SpecifierSet, - hashes: Hashes, - prefers_installed: bool, - incompatible_ids: Set[int], - ) -> Iterable[Candidate]: - if not ireqs: - return () - - # The InstallRequirement implementation requires us to give it a - # "template". Here we just choose the first requirement to represent - # all of them. - # Hopefully the Project model can correct this mismatch in the future. - template = ireqs[0] - assert template.req, "Candidates found on index must be PEP 508" - name = canonicalize_name(template.req.name) - - extras: FrozenSet[str] = frozenset() - for ireq in ireqs: - assert ireq.req, "Candidates found on index must be PEP 508" - specifier &= ireq.req.specifier - hashes &= ireq.hashes(trust_internet=False) - extras |= frozenset(ireq.extras) - - def _get_installed_candidate() -> Optional[Candidate]: - """Get the candidate for the currently-installed version.""" - # If --force-reinstall is set, we want the version from the index - # instead, so we "pretend" there is nothing installed. - if self._force_reinstall: - return None - try: - installed_dist = self._installed_dists[name] - except KeyError: - return None - # Don't use the installed distribution if its version does not fit - # the current dependency graph. - if not specifier.contains(installed_dist.version, prereleases=True): - return None - candidate = self._make_candidate_from_dist( - dist=installed_dist, - extras=extras, - template=template, - ) - # The candidate is a known incompatibility. Don't use it. - if id(candidate) in incompatible_ids: - return None - return candidate - - def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]: - result = self._finder.find_best_candidate( - project_name=name, - specifier=specifier, - hashes=hashes, - ) - icans = list(result.iter_applicable()) - - # PEP 592: Yanked releases are ignored unless the specifier - # explicitly pins a version (via '==' or '===') that can be - # solely satisfied by a yanked release. - all_yanked = all(ican.link.is_yanked for ican in icans) - - def is_pinned(specifier: SpecifierSet) -> bool: - for sp in specifier: - if sp.operator == "===": - return True - if sp.operator != "==": - continue - if sp.version.endswith(".*"): - continue - return True - return False - - pinned = is_pinned(specifier) - - # PackageFinder returns earlier versions first, so we reverse. - for ican in reversed(icans): - if not (all_yanked and pinned) and ican.link.is_yanked: - continue - func = functools.partial( - self._make_candidate_from_link, - link=ican.link, - extras=extras, - template=template, - name=name, - version=ican.version, - ) - yield ican.version, func - - return FoundCandidates( - iter_index_candidate_infos, - _get_installed_candidate(), - prefers_installed, - incompatible_ids, - ) - - def _iter_explicit_candidates_from_base( - self, - base_requirements: Iterable[Requirement], - extras: FrozenSet[str], - ) -> Iterator[Candidate]: - """Produce explicit candidates from the base given an extra-ed package. - - :param base_requirements: Requirements known to the resolver. The - requirements are guaranteed to not have extras. - :param extras: The extras to inject into the explicit requirements' - candidates. - """ - for req in base_requirements: - lookup_cand, _ = req.get_candidate_lookup() - if lookup_cand is None: # Not explicit. - continue - # We've stripped extras from the identifier, and should always - # get a BaseCandidate here, unless there's a bug elsewhere. - base_cand = as_base_candidate(lookup_cand) - assert base_cand is not None, "no extras here" - yield self._make_extras_candidate(base_cand, extras) - - def _iter_candidates_from_constraints( - self, - identifier: str, - constraint: Constraint, - template: InstallRequirement, - ) -> Iterator[Candidate]: - """Produce explicit candidates from constraints. - - This creates "fake" InstallRequirement objects that are basically clones - of what "should" be the template, but with original_link set to link. - """ - for link in constraint.links: - self._fail_if_link_is_unsupported_wheel(link) - candidate = self._make_candidate_from_link( - link, - extras=frozenset(), - template=install_req_from_link_and_ireq(link, template), - name=canonicalize_name(identifier), - version=None, - ) - if candidate: - yield candidate - - def find_candidates( - self, - identifier: str, - requirements: Mapping[str, Iterable[Requirement]], - incompatibilities: Mapping[str, Iterator[Candidate]], - constraint: Constraint, - prefers_installed: bool, - ) -> Iterable[Candidate]: - # Collect basic lookup information from the requirements. - explicit_candidates: Set[Candidate] = set() - ireqs: List[InstallRequirement] = [] - for req in requirements[identifier]: - cand, ireq = req.get_candidate_lookup() - if cand is not None: - explicit_candidates.add(cand) - if ireq is not None: - ireqs.append(ireq) - - # If the current identifier contains extras, add explicit candidates - # from entries from extra-less identifier. - with contextlib.suppress(InvalidRequirement): - parsed_requirement = get_requirement(identifier) - explicit_candidates.update( - self._iter_explicit_candidates_from_base( - requirements.get(parsed_requirement.name, ()), - frozenset(parsed_requirement.extras), - ), - ) - - # Add explicit candidates from constraints. We only do this if there are - # known ireqs, which represent requirements not already explicit. If - # there are no ireqs, we're constraining already-explicit requirements, - # which is handled later when we return the explicit candidates. - if ireqs: - try: - explicit_candidates.update( - self._iter_candidates_from_constraints( - identifier, - constraint, - template=ireqs[0], - ), - ) - except UnsupportedWheel: - # If we're constrained to install a wheel incompatible with the - # target architecture, no candidates will ever be valid. - return () - - # Since we cache all the candidates, incompatibility identification - # can be made quicker by comparing only the id() values. - incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())} - - # If none of the requirements want an explicit candidate, we can ask - # the finder for candidates. - if not explicit_candidates: - return self._iter_found_candidates( - ireqs, - constraint.specifier, - constraint.hashes, - prefers_installed, - incompat_ids, - ) - - return ( - c - for c in explicit_candidates - if id(c) not in incompat_ids - and constraint.is_satisfied_by(c) - and all(req.is_satisfied_by(c) for req in requirements[identifier]) - ) - - def _make_requirement_from_install_req( - self, ireq: InstallRequirement, requested_extras: Iterable[str] - ) -> Optional[Requirement]: - if not ireq.match_markers(requested_extras): - logger.info( - "Ignoring %s: markers '%s' don't match your environment", - ireq.name, - ireq.markers, - ) - return None - if not ireq.link: - return SpecifierRequirement(ireq) - self._fail_if_link_is_unsupported_wheel(ireq.link) - cand = self._make_candidate_from_link( - ireq.link, - extras=frozenset(ireq.extras), - template=ireq, - name=canonicalize_name(ireq.name) if ireq.name else None, - version=None, - ) - if cand is None: - # There's no way we can satisfy a URL requirement if the underlying - # candidate fails to build. An unnamed URL must be user-supplied, so - # we fail eagerly. If the URL is named, an unsatisfiable requirement - # can make the resolver do the right thing, either backtrack (and - # maybe find some other requirement that's buildable) or raise a - # ResolutionImpossible eventually. - if not ireq.name: - raise self._build_failures[ireq.link] - return UnsatisfiableRequirement(canonicalize_name(ireq.name)) - return self.make_requirement_from_candidate(cand) - - def collect_root_requirements( - self, root_ireqs: List[InstallRequirement] - ) -> CollectedRootRequirements: - collected = CollectedRootRequirements([], {}, {}) - for i, ireq in enumerate(root_ireqs): - if ireq.constraint: - # Ensure we only accept valid constraints - problem = check_invalid_constraint_type(ireq) - if problem: - raise InstallationError(problem) - if not ireq.match_markers(): - continue - assert ireq.name, "Constraint must be named" - name = canonicalize_name(ireq.name) - if name in collected.constraints: - collected.constraints[name] &= ireq - else: - collected.constraints[name] = Constraint.from_ireq(ireq) - else: - req = self._make_requirement_from_install_req( - ireq, - requested_extras=(), - ) - if req is None: - continue - if ireq.user_supplied and req.name not in collected.user_requested: - collected.user_requested[req.name] = i - collected.requirements.append(req) - return collected - - def make_requirement_from_candidate( - self, candidate: Candidate - ) -> ExplicitRequirement: - return ExplicitRequirement(candidate) - - def make_requirement_from_spec( - self, - specifier: str, - comes_from: Optional[InstallRequirement], - requested_extras: Iterable[str] = (), - ) -> Optional[Requirement]: - ireq = self._make_install_req_from_spec(specifier, comes_from) - return self._make_requirement_from_install_req(ireq, requested_extras) - - def make_requires_python_requirement( - self, - specifier: SpecifierSet, - ) -> Optional[Requirement]: - if self._ignore_requires_python: - return None - # Don't bother creating a dependency for an empty Requires-Python. - if not str(specifier): - return None - return RequiresPythonRequirement(specifier, self._python_candidate) - - def get_wheel_cache_entry( - self, link: Link, name: Optional[str] - ) -> Optional[CacheEntry]: - """Look up the link in the wheel cache. - - If ``preparer.require_hashes`` is True, don't use the wheel cache, - because cached wheels, always built locally, have different hashes - than the files downloaded from the index server and thus throw false - hash mismatches. Furthermore, cached wheels at present have - nondeterministic contents due to file modification times. - """ - if self._wheel_cache is None: - return None - return self._wheel_cache.get_cache_entry( - link=link, - package_name=name, - supported_tags=get_supported(), - ) - - def get_dist_to_uninstall(self, candidate: Candidate) -> Optional[BaseDistribution]: - # TODO: Are there more cases this needs to return True? Editable? - dist = self._installed_dists.get(candidate.project_name) - if dist is None: # Not installed, no uninstallation required. - return None - - # We're installing into global site. The current installation must - # be uninstalled, no matter it's in global or user site, because the - # user site installation has precedence over global. - if not self._use_user_site: - return dist - - # We're installing into user site. Remove the user site installation. - if dist.in_usersite: - return dist - - # We're installing into user site, but the installed incompatible - # package is in global site. We can't uninstall that, and would let - # the new user installation to "shadow" it. But shadowing won't work - # in virtual environments, so we error out. - if running_under_virtualenv() and dist.in_site_packages: - message = ( - f"Will not install to the user site because it will lack " - f"sys.path precedence to {dist.raw_name} in {dist.location}" - ) - raise InstallationError(message) - return None - - def _report_requires_python_error( - self, causes: Sequence["ConflictCause"] - ) -> UnsupportedPythonVersion: - assert causes, "Requires-Python error reported with no cause" - - version = self._python_candidate.version - - if len(causes) == 1: - specifier = str(causes[0].requirement.specifier) - message = ( - f"Package {causes[0].parent.name!r} requires a different " - f"Python: {version} not in {specifier!r}" - ) - return UnsupportedPythonVersion(message) - - message = f"Packages require a different Python. {version} not in:" - for cause in causes: - package = cause.parent.format_for_error() - specifier = str(cause.requirement.specifier) - message += f"\n{specifier!r} (required by {package})" - return UnsupportedPythonVersion(message) - - def _report_single_requirement_conflict( - self, req: Requirement, parent: Optional[Candidate] - ) -> DistributionNotFound: - if parent is None: - req_disp = str(req) - else: - req_disp = f"{req} (from {parent.name})" - - cands = self._finder.find_all_candidates(req.project_name) - skipped_by_requires_python = self._finder.requires_python_skipped_reasons() - versions = [str(v) for v in sorted({c.version for c in cands})] - - if skipped_by_requires_python: - logger.critical( - "Ignored the following versions that require a different python " - "version: %s", - "; ".join(skipped_by_requires_python) or "none", - ) - logger.critical( - "Could not find a version that satisfies the requirement %s " - "(from versions: %s)", - req_disp, - ", ".join(versions) or "none", - ) - if str(req) == "requirements.txt": - logger.info( - "HINT: You are attempting to install a package literally " - 'named "requirements.txt" (which cannot exist). Consider ' - "using the '-r' flag to install the packages listed in " - "requirements.txt" - ) - - return DistributionNotFound(f"No matching distribution found for {req}") - - def get_installation_error( - self, - e: "ResolutionImpossible[Requirement, Candidate]", - constraints: Dict[str, Constraint], - ) -> InstallationError: - assert e.causes, "Installation error reported with no cause" - - # If one of the things we can't solve is "we need Python X.Y", - # that is what we report. - requires_python_causes = [ - cause - for cause in e.causes - if isinstance(cause.requirement, RequiresPythonRequirement) - and not cause.requirement.is_satisfied_by(self._python_candidate) - ] - if requires_python_causes: - # The comprehension above makes sure all Requirement instances are - # RequiresPythonRequirement, so let's cast for convenience. - return self._report_requires_python_error( - cast("Sequence[ConflictCause]", requires_python_causes), - ) - - # Otherwise, we have a set of causes which can't all be satisfied - # at once. - - # The simplest case is when we have *one* cause that can't be - # satisfied. We just report that case. - if len(e.causes) == 1: - req, parent = e.causes[0] - if req.name not in constraints: - return self._report_single_requirement_conflict(req, parent) - - # OK, we now have a list of requirements that can't all be - # satisfied at once. - - # A couple of formatting helpers - def text_join(parts: List[str]) -> str: - if len(parts) == 1: - return parts[0] - - return ", ".join(parts[:-1]) + " and " + parts[-1] - - def describe_trigger(parent: Candidate) -> str: - ireq = parent.get_install_requirement() - if not ireq or not ireq.comes_from: - return f"{parent.name}=={parent.version}" - if isinstance(ireq.comes_from, InstallRequirement): - return str(ireq.comes_from.name) - return str(ireq.comes_from) - - triggers = set() - for req, parent in e.causes: - if parent is None: - # This is a root requirement, so we can report it directly - trigger = req.format_for_error() - else: - trigger = describe_trigger(parent) - triggers.add(trigger) - - if triggers: - info = text_join(sorted(triggers)) - else: - info = "the requested packages" - - msg = ( - "Cannot install {} because these package versions " - "have conflicting dependencies.".format(info) - ) - logger.critical(msg) - msg = "\nThe conflict is caused by:" - - relevant_constraints = set() - for req, parent in e.causes: - if req.name in constraints: - relevant_constraints.add(req.name) - msg = msg + "\n " - if parent: - msg = msg + f"{parent.name} {parent.version} depends on " - else: - msg = msg + "The user requested " - msg = msg + req.format_for_error() - for key in relevant_constraints: - spec = constraints[key].specifier - msg += f"\n The user requested (constraint) {key}{spec}" - - msg = ( - msg - + "\n\n" - + "To fix this you could try to:\n" - + "1. loosen the range of package versions you've specified\n" - + "2. remove package versions to allow pip attempt to solve " - + "the dependency conflict\n" - ) - - logger.info(msg) - - return DistributionNotFound( - "ResolutionImpossible: for help visit " - "https://pip.pypa.io/en/latest/topics/dependency-resolution/" - "#dealing-with-dependency-conflicts" - ) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py deleted file mode 100644 index edc19627dba6835339768ccbaf726db21d8ac212..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/style.py +++ /dev/null @@ -1,197 +0,0 @@ -""" - pygments.style - ~~~~~~~~~~~~~~ - - Basic style object. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pip._vendor.pygments.token import Token, STANDARD_TYPES - -# Default mapping of ansixxx to RGB colors. -_ansimap = { - # dark - 'ansiblack': '000000', - 'ansired': '7f0000', - 'ansigreen': '007f00', - 'ansiyellow': '7f7fe0', - 'ansiblue': '00007f', - 'ansimagenta': '7f007f', - 'ansicyan': '007f7f', - 'ansigray': 'e5e5e5', - # normal - 'ansibrightblack': '555555', - 'ansibrightred': 'ff0000', - 'ansibrightgreen': '00ff00', - 'ansibrightyellow': 'ffff00', - 'ansibrightblue': '0000ff', - 'ansibrightmagenta': 'ff00ff', - 'ansibrightcyan': '00ffff', - 'ansiwhite': 'ffffff', -} -# mapping of deprecated #ansixxx colors to new color names -_deprecated_ansicolors = { - # dark - '#ansiblack': 'ansiblack', - '#ansidarkred': 'ansired', - '#ansidarkgreen': 'ansigreen', - '#ansibrown': 'ansiyellow', - '#ansidarkblue': 'ansiblue', - '#ansipurple': 'ansimagenta', - '#ansiteal': 'ansicyan', - '#ansilightgray': 'ansigray', - # normal - '#ansidarkgray': 'ansibrightblack', - '#ansired': 'ansibrightred', - '#ansigreen': 'ansibrightgreen', - '#ansiyellow': 'ansibrightyellow', - '#ansiblue': 'ansibrightblue', - '#ansifuchsia': 'ansibrightmagenta', - '#ansiturquoise': 'ansibrightcyan', - '#ansiwhite': 'ansiwhite', -} -ansicolors = set(_ansimap) - - -class StyleMeta(type): - - def __new__(mcs, name, bases, dct): - obj = type.__new__(mcs, name, bases, dct) - for token in STANDARD_TYPES: - if token not in obj.styles: - obj.styles[token] = '' - - def colorformat(text): - if text in ansicolors: - return text - if text[0:1] == '#': - col = text[1:] - if len(col) == 6: - return col - elif len(col) == 3: - return col[0] * 2 + col[1] * 2 + col[2] * 2 - elif text == '': - return '' - elif text.startswith('var') or text.startswith('calc'): - return text - assert False, "wrong color format %r" % text - - _styles = obj._styles = {} - - for ttype in obj.styles: - for token in ttype.split(): - if token in _styles: - continue - ndef = _styles.get(token.parent, None) - styledefs = obj.styles.get(token, '').split() - if not ndef or token is None: - ndef = ['', 0, 0, 0, '', '', 0, 0, 0] - elif 'noinherit' in styledefs and token is not Token: - ndef = _styles[Token][:] - else: - ndef = ndef[:] - _styles[token] = ndef - for styledef in obj.styles.get(token, '').split(): - if styledef == 'noinherit': - pass - elif styledef == 'bold': - ndef[1] = 1 - elif styledef == 'nobold': - ndef[1] = 0 - elif styledef == 'italic': - ndef[2] = 1 - elif styledef == 'noitalic': - ndef[2] = 0 - elif styledef == 'underline': - ndef[3] = 1 - elif styledef == 'nounderline': - ndef[3] = 0 - elif styledef[:3] == 'bg:': - ndef[4] = colorformat(styledef[3:]) - elif styledef[:7] == 'border:': - ndef[5] = colorformat(styledef[7:]) - elif styledef == 'roman': - ndef[6] = 1 - elif styledef == 'sans': - ndef[7] = 1 - elif styledef == 'mono': - ndef[8] = 1 - else: - ndef[0] = colorformat(styledef) - - return obj - - def style_for_token(cls, token): - t = cls._styles[token] - ansicolor = bgansicolor = None - color = t[0] - if color in _deprecated_ansicolors: - color = _deprecated_ansicolors[color] - if color in ansicolors: - ansicolor = color - color = _ansimap[color] - bgcolor = t[4] - if bgcolor in _deprecated_ansicolors: - bgcolor = _deprecated_ansicolors[bgcolor] - if bgcolor in ansicolors: - bgansicolor = bgcolor - bgcolor = _ansimap[bgcolor] - - return { - 'color': color or None, - 'bold': bool(t[1]), - 'italic': bool(t[2]), - 'underline': bool(t[3]), - 'bgcolor': bgcolor or None, - 'border': t[5] or None, - 'roman': bool(t[6]) or None, - 'sans': bool(t[7]) or None, - 'mono': bool(t[8]) or None, - 'ansicolor': ansicolor, - 'bgansicolor': bgansicolor, - } - - def list_styles(cls): - return list(cls) - - def styles_token(cls, ttype): - return ttype in cls._styles - - def __iter__(cls): - for token in cls._styles: - yield token, cls.style_for_token(token) - - def __len__(cls): - return len(cls._styles) - - -class Style(metaclass=StyleMeta): - - #: overall background color (``None`` means transparent) - background_color = '#ffffff' - - #: highlight background color - highlight_color = '#ffffcc' - - #: line number font color - line_number_color = 'inherit' - - #: line number background color - line_number_background_color = 'transparent' - - #: special line number font color - line_number_special_color = '#000000' - - #: special line number background color - line_number_special_background_color = '#ffffc0' - - #: Style definitions for individual token types. - styles = {} - - # Attribute for lexers defined within Pygments. If set - # to True, the style is not shown in the style gallery - # on the website. This is intended for language-specific - # styles. - web_style_gallery_exclude = False diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py deleted file mode 100644 index ed4f7ccb20efa3b54c719783e279c381ca5d8587..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import datetime -import io -import json -import logging -import numpy as np -import os -import shutil -import pycocotools.mask as mask_util -from fvcore.common.timer import Timer -from iopath.common.file_io import file_lock -from PIL import Image - -from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager - -from .. import DatasetCatalog, MetadataCatalog - -""" -This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". -""" - - -logger = logging.getLogger(__name__) - -__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] - - -def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): - """ - Load a json file with COCO's instances annotation format. - Currently supports instance detection, instance segmentation, - and person keypoints annotations. - - Args: - json_file (str): full path to the json file in COCO instances annotation format. - image_root (str or path-like): the directory where the images in this json file exists. - dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). - When provided, this function will also do the following: - - * Put "thing_classes" into the metadata associated with this dataset. - * Map the category ids into a contiguous range (needed by standard dataset format), - and add "thing_dataset_id_to_contiguous_id" to the metadata associated - with this dataset. - - This option should usually be provided, unless users need to load - the original json content and apply more processing manually. - extra_annotation_keys (list[str]): list of per-annotation keys that should also be - loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", - "category_id", "segmentation"). The values for these keys will be returned as-is. - For example, the densepose annotations are loaded in this way. - - Returns: - list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See - `Using Custom Datasets `_ ) when `dataset_name` is not None. - If `dataset_name` is None, the returned `category_ids` may be - incontiguous and may not conform to the Detectron2 standard format. - - Notes: - 1. This function does not read the image files. - The results do not have the "image" field. - """ - from pycocotools.coco import COCO - - timer = Timer() - json_file = PathManager.get_local_path(json_file) - with contextlib.redirect_stdout(io.StringIO()): - coco_api = COCO(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) - - id_map = None - if dataset_name is not None: - meta = MetadataCatalog.get(dataset_name) - cat_ids = sorted(coco_api.getCatIds()) - cats = coco_api.loadCats(cat_ids) - # The categories in a custom json file may not be sorted. - thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] - meta.thing_classes = thing_classes - - # In COCO, certain category ids are artificially removed, - # and by convention they are always ignored. - # We deal with COCO's id issue and translate - # the category ids to contiguous ids in [0, 80). - - # It works by looking at the "categories" field in the json, therefore - # if users' own json also have incontiguous ids, we'll - # apply this mapping as well but print a warning. - if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): - if "coco" not in dataset_name: - logger.warning( - """ -Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. -""" - ) - id_map = {v: i for i, v in enumerate(cat_ids)} - meta.thing_dataset_id_to_contiguous_id = id_map - - # sort indices for reproducible results - img_ids = sorted(coco_api.imgs.keys()) - # imgs is a list of dicts, each looks something like: - # {'license': 4, - # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', - # 'file_name': 'COCO_val2014_000000001268.jpg', - # 'height': 427, - # 'width': 640, - # 'date_captured': '2013-11-17 05:57:24', - # 'id': 1268} - imgs = coco_api.loadImgs(img_ids) - # anns is a list[list[dict]], where each dict is an annotation - # record for an object. The inner list enumerates the objects in an image - # and the outer list enumerates over images. Example of anns[0]: - # [{'segmentation': [[192.81, - # 247.09, - # ... - # 219.03, - # 249.06]], - # 'area': 1035.749, - # 'iscrowd': 0, - # 'image_id': 1268, - # 'bbox': [192.81, 224.8, 74.73, 33.43], - # 'category_id': 16, - # 'id': 42986}, - # ...] - anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] - total_num_valid_anns = sum([len(x) for x in anns]) - total_num_anns = len(coco_api.anns) - if total_num_valid_anns < total_num_anns: - logger.warning( - f"{json_file} contains {total_num_anns} annotations, but only " - f"{total_num_valid_anns} of them match to images in the file." - ) - - if "minival" not in json_file: - # The popular valminusminival & minival annotations for COCO2014 contain this bug. - # However the ratio of buggy annotations there is tiny and does not affect accuracy. - # Therefore we explicitly white-list them. - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( - json_file - ) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) - - dataset_dicts = [] - - ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) - - num_instances_without_valid_segmentation = 0 - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - record["file_name"] = os.path.join(image_root, img_dict["file_name"]) - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - # Check that the image_id in this annotation is the same as - # the image_id we're looking at. - # This fails only when the data parsing logic or the annotation file is buggy. - - # The original COCO valminusminival2014 & minival2014 annotation files - # actually contains bugs that, together with certain ways of using COCO API, - # can trigger this assertion. - assert anno["image_id"] == image_id - - assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' - - obj = {key: anno[key] for key in ann_keys if key in anno} - if "bbox" in obj and len(obj["bbox"]) == 0: - raise ValueError( - f"One annotation of image {image_id} contains empty 'bbox' value! " - "This json does not have valid COCO format." - ) - - segm = anno.get("segmentation", None) - if segm: # either list[list[float]] or dict(RLE) - if isinstance(segm, dict): - if isinstance(segm["counts"], list): - # convert to compressed RLE - segm = mask_util.frPyObjects(segm, *segm["size"]) - else: - # filter out invalid polygons (< 3 points) - segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] - if len(segm) == 0: - num_instances_without_valid_segmentation += 1 - continue # ignore this instance - obj["segmentation"] = segm - - keypts = anno.get("keypoints", None) - if keypts: # list[int] - for idx, v in enumerate(keypts): - if idx % 3 != 2: - # COCO's segmentation coordinates are floating points in [0, H or W], - # but keypoint coordinates are integers in [0, H-1 or W-1] - # Therefore we assume the coordinates are "pixel indices" and - # add 0.5 to convert to floating point coordinates. - keypts[idx] = v + 0.5 - obj["keypoints"] = keypts - - obj["bbox_mode"] = BoxMode.XYWH_ABS - if id_map: - annotation_category_id = obj["category_id"] - try: - obj["category_id"] = id_map[annotation_category_id] - except KeyError as e: - raise KeyError( - f"Encountered category_id={annotation_category_id} " - "but this id does not exist in 'categories' of the json file." - ) from e - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - if num_instances_without_valid_segmentation > 0: - logger.warning( - "Filtered out {} instances without valid segmentation. ".format( - num_instances_without_valid_segmentation - ) - + "There might be issues in your dataset generation process. Please " - "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" - ) - return dataset_dicts - - -def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): - """ - Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are - treated as ground truth annotations and all files under "image_root" with "image_ext" extension - as input images. Ground truth and input images are matched using file paths relative to - "gt_root" and "image_root" respectively without taking into account file extensions. - This works for COCO as well as some other datasets. - - Args: - gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation - annotations are stored as images with integer values in pixels that represent - corresponding semantic labels. - image_root (str): the directory where the input images are. - gt_ext (str): file extension for ground truth annotations. - image_ext (str): file extension for input images. - - Returns: - list[dict]: - a list of dicts in detectron2 standard format without instance-level - annotation. - - Notes: - 1. This function does not read the image and ground truth files. - The results do not have the "image" and "sem_seg" fields. - """ - - # We match input images with ground truth based on their relative filepaths (without file - # extensions) starting from 'image_root' and 'gt_root' respectively. - def file2id(folder_path, file_path): - # extract relative path starting from `folder_path` - image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) - # remove file extension - image_id = os.path.splitext(image_id)[0] - return image_id - - input_files = sorted( - (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), - key=lambda file_path: file2id(image_root, file_path), - ) - gt_files = sorted( - (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), - key=lambda file_path: file2id(gt_root, file_path), - ) - - assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) - - # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images - if len(input_files) != len(gt_files): - logger.warn( - "Directory {} and {} has {} and {} files, respectively.".format( - image_root, gt_root, len(input_files), len(gt_files) - ) - ) - input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] - gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] - intersect = list(set(input_basenames) & set(gt_basenames)) - # sort, otherwise each worker may obtain a list[dict] in different order - intersect = sorted(intersect) - logger.warn("Will use their intersection of {} files.".format(len(intersect))) - input_files = [os.path.join(image_root, f + image_ext) for f in intersect] - gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] - - logger.info( - "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) - ) - - dataset_dicts = [] - for (img_path, gt_path) in zip(input_files, gt_files): - record = {} - record["file_name"] = img_path - record["sem_seg_file_name"] = gt_path - dataset_dicts.append(record) - - return dataset_dicts - - -def convert_to_coco_dict(dataset_name): - """ - Convert an instance detection/segmentation or keypoint detection dataset - in detectron2's standard format into COCO json format. - - Generic dataset description can be found here: - https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset - - COCO data format description can be found here: - http://cocodataset.org/#format-data - - Args: - dataset_name (str): - name of the source dataset - Must be registered in DatastCatalog and in detectron2's standard format. - Must have corresponding metadata "thing_classes" - Returns: - coco_dict: serializable dict in COCO json format - """ - - dataset_dicts = DatasetCatalog.get(dataset_name) - metadata = MetadataCatalog.get(dataset_name) - - # unmap the category mapping ids for COCO - if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} - reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa - else: - reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa - - categories = [ - {"id": reverse_id_mapper(id), "name": name} - for id, name in enumerate(metadata.thing_classes) - ] - - logger.info("Converting dataset dicts into COCO format") - coco_images = [] - coco_annotations = [] - - for image_id, image_dict in enumerate(dataset_dicts): - coco_image = { - "id": image_dict.get("image_id", image_id), - "width": int(image_dict["width"]), - "height": int(image_dict["height"]), - "file_name": str(image_dict["file_name"]), - } - coco_images.append(coco_image) - - anns_per_image = image_dict.get("annotations", []) - for annotation in anns_per_image: - # create a new dict with only COCO fields - coco_annotation = {} - - # COCO requirement: XYWH box format for axis-align and XYWHA for rotated - bbox = annotation["bbox"] - if isinstance(bbox, np.ndarray): - if bbox.ndim != 1: - raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") - bbox = bbox.tolist() - if len(bbox) not in [4, 5]: - raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") - from_bbox_mode = annotation["bbox_mode"] - to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS - bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) - - # COCO requirement: instance area - if "segmentation" in annotation: - # Computing areas for instances by counting the pixels - segmentation = annotation["segmentation"] - # TODO: check segmentation type: RLE, BinaryMask or Polygon - if isinstance(segmentation, list): - polygons = PolygonMasks([segmentation]) - area = polygons.area()[0].item() - elif isinstance(segmentation, dict): # RLE - area = mask_util.area(segmentation).item() - else: - raise TypeError(f"Unknown segmentation type {type(segmentation)}!") - else: - # Computing areas using bounding boxes - if to_bbox_mode == BoxMode.XYWH_ABS: - bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) - area = Boxes([bbox_xy]).area()[0].item() - else: - area = RotatedBoxes([bbox]).area()[0].item() - - if "keypoints" in annotation: - keypoints = annotation["keypoints"] # list[int] - for idx, v in enumerate(keypoints): - if idx % 3 != 2: - # COCO's segmentation coordinates are floating points in [0, H or W], - # but keypoint coordinates are integers in [0, H-1 or W-1] - # For COCO format consistency we substract 0.5 - # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163 - keypoints[idx] = v - 0.5 - if "num_keypoints" in annotation: - num_keypoints = annotation["num_keypoints"] - else: - num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) - - # COCO requirement: - # linking annotations to images - # "id" field must start with 1 - coco_annotation["id"] = len(coco_annotations) + 1 - coco_annotation["image_id"] = coco_image["id"] - coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] - coco_annotation["area"] = float(area) - coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) - coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) - - # Add optional fields - if "keypoints" in annotation: - coco_annotation["keypoints"] = keypoints - coco_annotation["num_keypoints"] = num_keypoints - - if "segmentation" in annotation: - seg = coco_annotation["segmentation"] = annotation["segmentation"] - if isinstance(seg, dict): # RLE - counts = seg["counts"] - if not isinstance(counts, str): - # make it json-serializable - seg["counts"] = counts.decode("ascii") - - coco_annotations.append(coco_annotation) - - logger.info( - "Conversion finished, " - f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" - ) - - info = { - "date_created": str(datetime.datetime.now()), - "description": "Automatically generated COCO json file for Detectron2.", - } - coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None} - if len(coco_annotations) > 0: - coco_dict["annotations"] = coco_annotations - return coco_dict - - -def convert_to_coco_json(dataset_name, output_file, allow_cached=True): - """ - Converts dataset into COCO format and saves it to a json file. - dataset_name must be registered in DatasetCatalog and in detectron2's standard format. - - Args: - dataset_name: - reference from the config file to the catalogs - must be registered in DatasetCatalog and in detectron2's standard format - output_file: path of json file that will be saved to - allow_cached: if json file is already present then skip conversion - """ - - # TODO: The dataset or the conversion script *may* change, - # a checksum would be useful for validating the cached data - - PathManager.mkdirs(os.path.dirname(output_file)) - with file_lock(output_file): - if PathManager.exists(output_file) and allow_cached: - logger.warning( - f"Using previously cached COCO format annotations at '{output_file}'. " - "You need to clear the cache file if your dataset has been modified." - ) - else: - logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") - coco_dict = convert_to_coco_dict(dataset_name) - - logger.info(f"Caching COCO format annotations at '{output_file}' ...") - tmp_file = output_file + ".tmp" - with PathManager.open(tmp_file, "w") as f: - json.dump(coco_dict, f) - shutil.move(tmp_file, output_file) - - -def register_coco_instances(name, metadata, json_file, image_root): - """ - Register a dataset in COCO's json annotation format for - instance detection, instance segmentation and keypoint detection. - (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. - `instances*.json` and `person_keypoints*.json` in the dataset). - - This is an example of how to register a new dataset. - You can do something similar to this function, to register new datasets. - - Args: - name (str): the name that identifies a dataset, e.g. "coco_2014_train". - metadata (dict): extra metadata associated with this dataset. You can - leave it as an empty dict. - json_file (str): path to the json instance annotation file. - image_root (str or path-like): directory which contains all the images. - """ - assert isinstance(name, str), name - assert isinstance(json_file, (str, os.PathLike)), json_file - assert isinstance(image_root, (str, os.PathLike)), image_root - # 1. register a function which returns dicts - DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) - - # 2. Optionally, add metadata about this dataset, - # since they might be useful in evaluation, visualization or logging - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata - ) - - -if __name__ == "__main__": - """ - Test the COCO json dataset loader. - - Usage: - python -m detectron2.data.datasets.coco \ - path/to/json path/to/image_root dataset_name - - "dataset_name" can be "coco_2014_minival_100", or other - pre-registered ones - """ - from detectron2.utils.logger import setup_logger - from detectron2.utils.visualizer import Visualizer - import detectron2.data.datasets # noqa # add pre-defined metadata - import sys - - logger = setup_logger(name=__name__) - assert sys.argv[3] in DatasetCatalog.list() - meta = MetadataCatalog.get(sys.argv[3]) - - dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) - logger.info("Done loading {} samples.".format(len(dicts))) - - dirname = "coco-data-vis" - os.makedirs(dirname, exist_ok=True) - for d in dicts: - img = np.array(Image.open(d["file_name"])) - visualizer = Visualizer(img, metadata=meta) - vis = visualizer.draw_dataset_dict(d) - fpath = os.path.join(dirname, os.path.basename(d["file_name"])) - vis.save(fpath) diff --git a/spaces/Vageesh1/personality_chat/README.md b/spaces/Vageesh1/personality_chat/README.md deleted file mode 100644 index ce8c2e401b8355bb32521b32b65170b35ef2c6c5..0000000000000000000000000000000000000000 --- a/spaces/Vageesh1/personality_chat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Personality Chat -emoji: 🦀 -colorFrom: purple -colorTo: yellow -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/utils.py b/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/utils.py deleted file mode 100644 index 5bd18f70225e12b2e27fdb4eabcde91d959f8e31..0000000000000000000000000000000000000000 --- a/spaces/Volkopat/SegmentAnythingxGroundingDINO/groundingdino/models/GroundingDINO/utils.py +++ /dev/null @@ -1,268 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import copy -import math - -import torch -import torch.nn.functional as F -from torch import Tensor, nn - - -def _get_clones(module, N, layer_share=False): - # import ipdb; ipdb.set_trace() - if layer_share: - return nn.ModuleList([module for i in range(N)]) - else: - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def get_sine_pos_embed( - pos_tensor: torch.Tensor, - num_pos_feats: int = 128, - temperature: int = 10000, - exchange_xy: bool = True, -): - """generate sine position embedding from a position tensor - Args: - pos_tensor (torch.Tensor): shape: [..., n]. - num_pos_feats (int): projected shape for each float in the tensor. - temperature (int): temperature in the sine/cosine function. - exchange_xy (bool, optional): exchange pos x and pos y. \ - For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True. - Returns: - pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. - """ - scale = 2 * math.pi - dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) - dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) - - def sine_func(x: torch.Tensor): - sin_x = x * scale / dim_t - sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) - return sin_x - - pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)] - if exchange_xy: - pos_res[0], pos_res[1] = pos_res[1], pos_res[0] - pos_res = torch.cat(pos_res, dim=-1) - return pos_res - - -def gen_encoder_output_proposals( - memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None -): - """ - Input: - - memory: bs, \sum{hw}, d_model - - memory_padding_mask: bs, \sum{hw} - - spatial_shapes: nlevel, 2 - - learnedwh: 2 - Output: - - output_memory: bs, \sum{hw}, d_model - - output_proposals: bs, \sum{hw}, 4 - """ - N_, S_, C_ = memory.shape - proposals = [] - _cur = 0 - for lvl, (H_, W_) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1) - valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) - valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) - - # import ipdb; ipdb.set_trace() - - grid_y, grid_x = torch.meshgrid( - torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device), - torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device), - ) - grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2 - - scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2) - grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - - if learnedwh is not None: - # import ipdb; ipdb.set_trace() - wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl) - else: - wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) - - # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1) - # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - # wh = torch.ones_like(grid) / scale - proposal = torch.cat((grid, wh), -1).view(N_, -1, 4) - proposals.append(proposal) - _cur += H_ * W_ - # import ipdb; ipdb.set_trace() - output_proposals = torch.cat(proposals, 1) - output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all( - -1, keepdim=True - ) - output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid - output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf")) - output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) - - output_memory = memory - output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0)) - output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) - - # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf')) - # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf')) - - return output_memory, output_proposals - - -class RandomBoxPerturber: - def __init__( - self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2 - ) -> None: - self.noise_scale = torch.Tensor( - [x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale] - ) - - def __call__(self, refanchors: Tensor) -> Tensor: - nq, bs, query_dim = refanchors.shape - device = refanchors.device - - noise_raw = torch.rand_like(refanchors) - noise_scale = self.noise_scale.to(device)[:query_dim] - - new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale) - return new_refanchors.clamp_(0, 1) - - -def sigmoid_focal_loss( - inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - if no_reduction: - return loss - - return loss.mean(1).sum() / num_boxes - - -class MLP(nn.Module): - """Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - - -def _get_activation_fn(activation, d_model=256, batch_dim=0): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - if activation == "prelu": - return nn.PReLU() - if activation == "selu": - return F.selu - - raise RuntimeError(f"activation should be relu/gelu, not {activation}.") - - -def gen_sineembed_for_position(pos_tensor): - # n_query, bs, _ = pos_tensor.size() - # sineembed_tensor = torch.zeros(n_query, bs, 256) - scale = 2 * math.pi - dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) - dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128) - x_embed = pos_tensor[:, :, 0] * scale - y_embed = pos_tensor[:, :, 1] * scale - pos_x = x_embed[:, :, None] / dim_t - pos_y = y_embed[:, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) - pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) - if pos_tensor.size(-1) == 2: - pos = torch.cat((pos_y, pos_x), dim=2) - elif pos_tensor.size(-1) == 4: - w_embed = pos_tensor[:, :, 2] * scale - pos_w = w_embed[:, :, None] / dim_t - pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2) - - h_embed = pos_tensor[:, :, 3] * scale - pos_h = h_embed[:, :, None] / dim_t - pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2) - - pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) - else: - raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) - return pos - - -class ContrastiveEmbed(nn.Module): - def __init__(self, max_text_len=256): - """ - Args: - max_text_len: max length of text. - """ - super().__init__() - self.max_text_len = max_text_len - - def forward(self, x, text_dict): - """_summary_ - - Args: - x (_type_): _description_ - text_dict (_type_): _description_ - { - 'encoded_text': encoded_text, # bs, 195, d_model - 'text_token_mask': text_token_mask, # bs, 195 - # True for used tokens. False for padding tokens - } - Returns: - _type_: _description_ - """ - assert isinstance(text_dict, dict) - - y = text_dict["encoded_text"] - text_token_mask = text_dict["text_token_mask"] - - res = x @ y.transpose(-1, -2) - res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) - - # padding to max_text_len - new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device) - new_res[..., : res.shape[-1]] = res - - return new_res diff --git "a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" "b/spaces/WhyLIM/ChatGPT-academic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" deleted file mode 100644 index a239d965d7ed512cf3c6c96682c59629d8feaa20..0000000000000000000000000000000000000000 --- "a/spaces/WhyLIM/ChatGPT-academic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" +++ /dev/null @@ -1,149 +0,0 @@ -from predict import predict_no_ui -from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down -fast_debug = False - -def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): - import time, glob, os - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8') as f: - file_content = f.read() - - prefix = "接下来请你逐文件分析下面的工程" if index==0 else "" - i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - msg = '正常' - - # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield chatbot, history, msg - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield chatbot, history, msg - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield chatbot, history, msg - - - - -@CatchException -def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import time, glob, os - file_manifest = [f for f in glob.glob('*.py')] - for index, fp in enumerate(file_manifest): - # if 'test_project' in fp: continue - with open(fp, 'r', encoding='utf-8') as f: - file_content = f.read() - - prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else "" - i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - # ** gpt request ** - # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield chatbot, history, '正常' - time.sleep(2) - - i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - # ** gpt request ** - # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield chatbot, history, '正常' - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield chatbot, history, '正常' - -@CatchException -def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - - -@CatchException -def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - -@CatchException -def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - diff --git a/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/utils/utils.py b/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/utils/utils.py deleted file mode 100644 index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000 --- a/spaces/Wrathless/Dkrotzer-MusicalMagic/audiocraft/utils/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from concurrent.futures import ProcessPoolExecutor -from functools import wraps -import hashlib -import logging -import typing as tp - -import flashy -import flashy.distrib -import omegaconf -import torch -from torch.nn.utils.rnn import pad_sequence - - -logger = logging.getLogger(__name__) - - -def dict_from_config(cfg: omegaconf.DictConfig) -> dict: - """Convenience function to map an omegaconf configuration to a dictionary. - - Args: - cfg (omegaconf.DictConfig): Original configuration to map to dict. - Returns: - dict: Config as dictionary object. - """ - dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) - assert isinstance(dct, dict) - return dct - - -def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: - if max_samples >= len(dataset): - return dataset - - generator = torch.Generator().manual_seed(seed) - perm = torch.randperm(len(dataset), generator=generator) - return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) - - -def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, - num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: - """Convenience function to load dataset into a dataloader with optional subset sampling. - - Args: - dataset: Dataset to load. - num_samples (Optional[int]): Number of samples to limit subset size. - batch_size (int): Batch size. - num_workers (int): Number of workers for data loading. - seed (int): Random seed. - """ - if num_samples is not None: - dataset = random_subset(dataset, num_samples, seed) - - dataloader = flashy.distrib.loader( - dataset, - batch_size=batch_size, - num_workers=num_workers, - **kwargs - ) - return dataloader - - -def get_dataset_from_loader(dataloader): - dataset = dataloader.dataset - if isinstance(dataset, torch.utils.data.Subset): - return dataset.dataset - else: - return dataset - - -def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): - """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. - - Args: - input (torch.Tensor): The input tensor containing probabilities. - num_samples (int): Number of samples to draw. - replacement (bool): Whether to draw with replacement or not. - Keywords args: - generator (torch.Generator): A pseudorandom number generator for sampling. - Returns: - torch.Tensor: Last dimension contains num_samples indices - sampled from the multinomial probability distribution - located in the last dimension of tensor input. - """ - input_ = input.reshape(-1, input.shape[-1]) - output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) - output = output_.reshape(*list(input.shape[:-1]), -1) - return output - - -def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: - """Sample next token from top K values along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - k (int): The k in “top-k”. - Returns: - torch.Tensor: Sampled tokens. - """ - top_k_value, _ = torch.topk(probs, k, dim=-1) - min_value_top_k = top_k_value[..., [-1]] - probs *= (probs >= min_value_top_k).float() - probs.div_(probs.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs, num_samples=1) - return next_token - - -def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: - """Sample next token from top P probabilities along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - p (int): The p in “top-p”. - Returns: - torch.Tensor: Sampled tokens. - """ - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort *= (~mask).float() - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token - - -class DummyPoolExecutor: - """Dummy pool executor to use when we actually have only 1 worker. - (e.g. instead of ProcessPoolExecutor). - """ - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers, mp_context=None): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return - - -def get_pool_executor(num_workers: int, mp_context=None): - return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) - - -def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: - """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). - For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] - - Args: - lengths (torch.Tensor): tensor with lengths - max_len (int): can set the max length manually. Defaults to None. - Returns: - torch.Tensor: mask with 0s where there is pad tokens else 1s - """ - assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." - final_length = lengths.max().item() if not max_len else max_len - final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor - return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] - - -def hash_trick(word: str, vocab_size: int) -> int: - """Hash trick to pair each word with an index - - Args: - word (str): word we wish to convert to an index - vocab_size (int): size of the vocabulary - Returns: - int: index of the word in the embedding LUT - """ - hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) - return hash % vocab_size - - -def with_rank_rng(base_seed: int = 1234): - """Decorator for a function so that the function will use a Random Number Generator - whose state depend on the GPU rank. The original RNG state is restored upon returning. - - Args: - base_seed (int): Random seed. - """ - def _decorator(fun: tp.Callable): - @wraps(fun) - def _decorated(*args, **kwargs): - state = torch.get_rng_state() - seed = base_seed ^ flashy.distrib.rank() - torch.manual_seed(seed) - logger.debug('Rank dependent seed set to %d', seed) - try: - return fun(*args, **kwargs) - finally: - torch.set_rng_state(state) - logger.debug('RNG state restored.') - return _decorated - return _decorator - - -def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get a list of tensors and collate them to a single tensor. according to the following logic: - - `dim` specifies the time dimension which will be stacked and padded. - - The output will contain 1 new dimension (dimension index 0) which will be the size of - of the original list. - - Args: - tensors (tp.List[torch.Tensor]): List of tensors to collate. - dim (int): Dimension which will be stacked and padded. - Returns: - tp.Tuple[torch.Tensor, torch.Tensor]: - torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension - (dimension index 0) which will be the size of the original list. - torch.Tensor: Tensor containing length of original tensor sizes (without padding). - """ - tensors = [x.transpose(0, dim) for x in tensors] - lens = torch.LongTensor([len(x) for x in tensors]) - padded_tensors = pad_sequence(tensors) - padded_tensors = padded_tensors.transpose(0, 1) - padded_tensors = padded_tensors.transpose(1, dim + 1) - return padded_tensors, lens diff --git a/spaces/XAI/VisualCorrespondenceHumanStudy/SessionState.py b/spaces/XAI/VisualCorrespondenceHumanStudy/SessionState.py deleted file mode 100644 index 48217b0f160349a91b9b3a0b50238a8dc851bb06..0000000000000000000000000000000000000000 --- a/spaces/XAI/VisualCorrespondenceHumanStudy/SessionState.py +++ /dev/null @@ -1,117 +0,0 @@ -"""Hack to add per-session state to Streamlit. - -Usage ------ - ->>> import SessionState ->>> ->>> session_state = SessionState.get(user_name='', favorite_color='black') ->>> session_state.user_name -'' ->>> session_state.user_name = 'Mary' ->>> session_state.favorite_color -'black' - -Since you set user_name above, next time your script runs this will be the -result: ->>> session_state = get(user_name='', favorite_color='black') ->>> session_state.user_name -'Mary' - -""" -try: - import streamlit.ReportThread as ReportThread - from streamlit.server.Server import Server -except Exception: - # Streamlit >= 0.65.0 - import streamlit.report_thread as ReportThread - from streamlit.server.server import Server - - -class SessionState(object): - def __init__(self, **kwargs): - """A new SessionState object. - - Parameters - ---------- - **kwargs : any - Default values for the session state. - - Example - ------- - >>> session_state = SessionState(user_name='', favorite_color='black') - >>> session_state.user_name = 'Mary' - '' - >>> session_state.favorite_color - 'black' - - """ - for key, val in kwargs.items(): - setattr(self, key, val) - - -def get(**kwargs): - """Gets a SessionState object for the current session. - - Creates a new object if necessary. - - Parameters - ---------- - **kwargs : any - Default values you want to add to the session state, if we're creating a - new one. - - Example - ------- - >>> session_state = get(user_name='', favorite_color='black') - >>> session_state.user_name - '' - >>> session_state.user_name = 'Mary' - >>> session_state.favorite_color - 'black' - - Since you set user_name above, next time your script runs this will be the - result: - >>> session_state = get(user_name='', favorite_color='black') - >>> session_state.user_name - 'Mary' - - """ - # Hack to get the session object from Streamlit. - - ctx = ReportThread.get_report_ctx() - - this_session = None - - current_server = Server.get_current() - if hasattr(current_server, '_session_infos'): - # Streamlit < 0.56 - session_infos = Server.get_current()._session_infos.values() - else: - session_infos = Server.get_current()._session_info_by_id.values() - - for session_info in session_infos: - s = session_info.session - if ( - # Streamlit < 0.54.0 - (hasattr(s, '_main_dg') and s._main_dg == ctx.main_dg) - or - # Streamlit >= 0.54.0 - (not hasattr(s, '_main_dg') and s.enqueue == ctx.enqueue) - or - # Streamlit >= 0.65.2 - (not hasattr(s, '_main_dg') and s._uploaded_file_mgr == ctx.uploaded_file_mgr) - ): - this_session = s - - if this_session is None: - raise RuntimeError( - "Oh noes. Couldn't get your Streamlit Session object. " - 'Are you doing something fancy with threads?') - - # Got the session object! Now let's attach some state into it. - - if not hasattr(this_session, '_custom_session_state'): - this_session._custom_session_state = SessionState(**kwargs) - - return this_session._custom_session_state diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/callbacks/mem.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/callbacks/mem.py deleted file mode 100644 index 41230ce3d14b2f7b7ff5f223e114b0791ec341c0..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/callbacks/mem.py +++ /dev/null @@ -1,65 +0,0 @@ -" Memory profiling callbacks " - -import tracemalloc, threading, torch, time -from ..utils.mem import * -from ..basic_train import * -from ..torch_core import * -from ..utils.pynvml_gate import * - -if use_gpu: pynvml = load_pynvml_env() - -class PeakMemMetric(LearnerCallback): - "Callback that measures used and peaked general and GPU memory." - - _order=-20 # Needs to run before the recorder - - def __init__(self, learn:Learner): - super().__init__(learn) - assert torch.cuda.is_available(), "pytorch CUDA is required" - preload_pytorch() - - def peak_monitor_start(self): - self.peak_monitoring = True - - # start RAM tracing - tracemalloc.start() - - # this thread samples RAM usage as long as the current epoch of the fit loop is running - peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) - peak_monitor_thread.daemon = True - peak_monitor_thread.start() - - def peak_monitor_stop(self): - tracemalloc.stop() - self.peak_monitoring = False - - def peak_monitor_func(self): - self.gpu_mem_used_peak = -1 - - gpu_id = torch.cuda.current_device() - gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) - - while True: - gpu_mem_used = gpu_mem_get_used_fast(gpu_handle) - self.gpu_mem_used_peak = max(gpu_mem_used, self.gpu_mem_used_peak) - if not self.peak_monitoring: break - time.sleep(0.001) # 1msec - - def on_train_begin(self, **kwargs): - self.learn.recorder.add_metric_names(['cpu used', 'peak', 'gpu used', 'peak']) - - def on_epoch_begin(self, **kwargs): - self.peak_monitor_start() - self.gpu_before = gpu_mem_get_used_no_cache() - - def on_epoch_end(self, last_metrics, **kwargs): - cpu_used, cpu_peak = list(map(lambda x: int(x/2**20), tracemalloc.get_traced_memory())) - self.peak_monitor_stop() - gpu_used = gpu_mem_get_used_no_cache() - self.gpu_before - gpu_peak = self.gpu_mem_used_peak - self.gpu_before - # can be negative, due to unreliable peak monitor thread - if gpu_peak < 0: gpu_peak = 0 - # since we want the overhead only, subtract delta used if it's positive - elif gpu_used > 0: gpu_peak -= gpu_used - # The numbers are deltas in MBs (beginning of the epoch and the end) - return add_metrics(last_metrics, [cpu_used, cpu_peak, gpu_used, gpu_peak]) diff --git a/spaces/Xenova/semantic-image-search-client/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js b/spaces/Xenova/semantic-image-search-client/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js deleted file mode 100644 index 6c105cac9df378f58bc8f459abc733e6933e27d4..0000000000000000000000000000000000000000 --- a/spaces/Xenova/semantic-image-search-client/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js +++ /dev/null @@ -1 +0,0 @@ -!function(){var t="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function e(t){var e={exports:{}};return t(e,e.exports),e.exports}var r=function(t){return t&&t.Math==Math&&t},n=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof t&&t)||Function("return this")(),o=function(t){try{return!!t()}catch(t){return!0}},i=!o(function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}),a={}.propertyIsEnumerable,u=Object.getOwnPropertyDescriptor,s=u&&!a.call({1:2},1)?function(t){var e=u(this,t);return!!e&&e.enumerable}:a,c={f:s},f=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}},l={}.toString,h=function(t){return l.call(t).slice(8,-1)},p="".split,d=o(function(){return!Object("z").propertyIsEnumerable(0)})?function(t){return"String"==h(t)?p.call(t,""):Object(t)}:Object,v=function(t){if(null==t)throw TypeError("Can't call method on "+t);return t},g=function(t){return d(v(t))},y=function(t){return"object"==typeof t?null!==t:"function"==typeof t},m=function(t,e){if(!y(t))return t;var r,n;if(e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;if("function"==typeof(r=t.valueOf)&&!y(n=r.call(t)))return n;if(!e&&"function"==typeof(r=t.toString)&&!y(n=r.call(t)))return n;throw TypeError("Can't convert object to primitive value")},b={}.hasOwnProperty,w=function(t,e){return b.call(t,e)},S=n.document,E=y(S)&&y(S.createElement),x=function(t){return E?S.createElement(t):{}},A=!i&&!o(function(){return 7!=Object.defineProperty(x("div"),"a",{get:function(){return 7}}).a}),O=Object.getOwnPropertyDescriptor,R={f:i?O:function(t,e){if(t=g(t),e=m(e,!0),A)try{return O(t,e)}catch(t){}if(w(t,e))return f(!c.f.call(t,e),t[e])}},j=function(t){if(!y(t))throw TypeError(String(t)+" is not an object");return t},P=Object.defineProperty,I={f:i?P:function(t,e,r){if(j(t),e=m(e,!0),j(r),A)try{return P(t,e,r)}catch(t){}if("get"in r||"set"in r)throw TypeError("Accessors not supported");return"value"in r&&(t[e]=r.value),t}},T=i?function(t,e,r){return I.f(t,e,f(1,r))}:function(t,e,r){return t[e]=r,t},k=function(t,e){try{T(n,t,e)}catch(r){n[t]=e}return e},L="__core-js_shared__",U=n[L]||k(L,{}),M=Function.toString;"function"!=typeof U.inspectSource&&(U.inspectSource=function(t){return M.call(t)});var _,N,C,F=U.inspectSource,B=n.WeakMap,D="function"==typeof B&&/native code/.test(F(B)),q=!1,z=e(function(t){(t.exports=function(t,e){return U[t]||(U[t]=void 0!==e?e:{})})("versions",[]).push({version:"3.6.5",mode:"global",copyright:"© 2020 Denis Pushkarev (zloirock.ru)"})}),W=0,K=Math.random(),G=function(t){return"Symbol("+String(void 0===t?"":t)+")_"+(++W+K).toString(36)},$=z("keys"),V=function(t){return $[t]||($[t]=G(t))},H={};if(D){var X=new(0,n.WeakMap),Y=X.get,J=X.has,Q=X.set;_=function(t,e){return Q.call(X,t,e),e},N=function(t){return Y.call(X,t)||{}},C=function(t){return J.call(X,t)}}else{var Z=V("state");H[Z]=!0,_=function(t,e){return T(t,Z,e),e},N=function(t){return w(t,Z)?t[Z]:{}},C=function(t){return w(t,Z)}}var tt,et={set:_,get:N,has:C,enforce:function(t){return C(t)?N(t):_(t,{})},getterFor:function(t){return function(e){var r;if(!y(e)||(r=N(e)).type!==t)throw TypeError("Incompatible receiver, "+t+" required");return r}}},rt=e(function(t){var e=et.get,r=et.enforce,o=String(String).split("String");(t.exports=function(t,e,i,a){var u=!!a&&!!a.unsafe,s=!!a&&!!a.enumerable,c=!!a&&!!a.noTargetGet;"function"==typeof i&&("string"!=typeof e||w(i,"name")||T(i,"name",e),r(i).source=o.join("string"==typeof e?e:"")),t!==n?(u?!c&&t[e]&&(s=!0):delete t[e],s?t[e]=i:T(t,e,i)):s?t[e]=i:k(e,i)})(Function.prototype,"toString",function(){return"function"==typeof this&&e(this).source||F(this)})}),nt=n,ot=function(t){return"function"==typeof t?t:void 0},it=function(t,e){return arguments.length<2?ot(nt[t])||ot(n[t]):nt[t]&&nt[t][e]||n[t]&&n[t][e]},at=Math.ceil,ut=Math.floor,st=function(t){return isNaN(t=+t)?0:(t>0?ut:at)(t)},ct=Math.min,ft=function(t){return t>0?ct(st(t),9007199254740991):0},lt=Math.max,ht=Math.min,pt=function(t,e){var r=st(t);return r<0?lt(r+e,0):ht(r,e)},dt=function(t){return function(e,r,n){var o,i=g(e),a=ft(i.length),u=pt(n,a);if(t&&r!=r){for(;a>u;)if((o=i[u++])!=o)return!0}else for(;a>u;u++)if((t||u in i)&&i[u]===r)return t||u||0;return!t&&-1}},vt={includes:dt(!0),indexOf:dt(!1)},gt=vt.indexOf,yt=function(t,e){var r,n=g(t),o=0,i=[];for(r in n)!w(H,r)&&w(n,r)&&i.push(r);for(;e.length>o;)w(n,r=e[o++])&&(~gt(i,r)||i.push(r));return i},mt=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],bt=mt.concat("length","prototype"),wt={f:Object.getOwnPropertyNames||function(t){return yt(t,bt)}},St={f:Object.getOwnPropertySymbols},Et=it("Reflect","ownKeys")||function(t){var e=wt.f(j(t)),r=St.f;return r?e.concat(r(t)):e},xt=function(t,e){for(var r=Et(e),n=I.f,o=R.f,i=0;i2?arguments[2]:void 0,u=Mt((void 0===a?n:pt(a,n))-i,n-o),s=1;for(i0;)i in r?r[o]=r[i]:delete r[o],o+=s,i+=s;return r},Nt=!!Object.getOwnPropertySymbols&&!o(function(){return!String(Symbol())}),Ct=Nt&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,Ft=z("wks"),Bt=n.Symbol,Dt=Ct?Bt:Bt&&Bt.withoutSetter||G,qt=function(t){return w(Ft,t)||(Ft[t]=Nt&&w(Bt,t)?Bt[t]:Dt("Symbol."+t)),Ft[t]},zt=Object.keys||function(t){return yt(t,mt)},Wt=i?Object.defineProperties:function(t,e){j(t);for(var r,n=zt(e),o=n.length,i=0;o>i;)I.f(t,r=n[i++],e[r]);return t},Kt=it("document","documentElement"),Gt="prototype",$t="script",Vt=V("IE_PROTO"),Ht=function(){},Xt=function(t){return"<"+$t+">"+t+""},Yt=function(){try{tt=document.domain&&new ActiveXObject("htmlfile")}catch(t){}var t,e,r;Yt=tt?function(t){t.write(Xt("")),t.close();var e=t.parentWindow.Object;return t=null,e}(tt):(e=x("iframe"),r="java"+$t+":",e.style.display="none",Kt.appendChild(e),e.src=String(r),(t=e.contentWindow.document).open(),t.write(Xt("document.F=Object")),t.close(),t.F);for(var n=mt.length;n--;)delete Yt[Gt][mt[n]];return Yt()};H[Vt]=!0;var Jt=Object.create||function(t,e){var r;return null!==t?(Ht[Gt]=j(t),r=new Ht,Ht[Gt]=null,r[Vt]=t):r=Yt(),void 0===e?r:Wt(r,e)},Qt=qt("unscopables"),Zt=Array.prototype;null==Zt[Qt]&&I.f(Zt,Qt,{configurable:!0,value:Jt(null)});var te=function(t){Zt[Qt][t]=!0};Lt({target:"Array",proto:!0},{copyWithin:_t}),te("copyWithin");var ee=function(t){if("function"!=typeof t)throw TypeError(String(t)+" is not a function");return t},re=function(t,e,r){if(ee(t),void 0===e)return t;switch(r){case 0:return function(){return t.call(e)};case 1:return function(r){return t.call(e,r)};case 2:return function(r,n){return t.call(e,r,n)};case 3:return function(r,n,o){return t.call(e,r,n,o)}}return function(){return t.apply(e,arguments)}},ne=Function.call,oe=function(t,e,r){return re(ne,n[t].prototype[e],r)};oe("Array","copyWithin"),Lt({target:"Array",proto:!0},{fill:function(t){for(var e=Ut(this),r=ft(e.length),n=arguments.length,o=pt(n>1?arguments[1]:void 0,r),i=n>2?arguments[2]:void 0,a=void 0===i?r:pt(i,r);a>o;)e[o++]=t;return e}}),te("fill"),oe("Array","fill");var ie=Array.isArray||function(t){return"Array"==h(t)},ae=qt("species"),ue=function(t,e){var r;return ie(t)&&("function"!=typeof(r=t.constructor)||r!==Array&&!ie(r.prototype)?y(r)&&null===(r=r[ae])&&(r=void 0):r=void 0),new(void 0===r?Array:r)(0===e?0:e)},se=[].push,ce=function(t){var e=1==t,r=2==t,n=3==t,o=4==t,i=6==t,a=5==t||i;return function(u,s,c,f){for(var l,h,p=Ut(u),v=d(p),g=re(s,c,3),y=ft(v.length),m=0,b=f||ue,w=e?b(u,y):r?b(u,0):void 0;y>m;m++)if((a||m in v)&&(h=g(l=v[m],m,p),t))if(e)w[m]=h;else if(h)switch(t){case 3:return!0;case 5:return l;case 6:return m;case 2:se.call(w,l)}else if(o)return!1;return i?-1:n||o?o:w}},fe={forEach:ce(0),map:ce(1),filter:ce(2),some:ce(3),every:ce(4),find:ce(5),findIndex:ce(6)},le=Object.defineProperty,he={},pe=function(t){throw t},de=function(t,e){if(w(he,t))return he[t];e||(e={});var r=[][t],n=!!w(e,"ACCESSORS")&&e.ACCESSORS,a=w(e,0)?e[0]:pe,u=w(e,1)?e[1]:void 0;return he[t]=!!r&&!o(function(){if(n&&!i)return!0;var t={length:-1};n?le(t,1,{enumerable:!0,get:pe}):t[1]=1,r.call(t,a,u)})},ve=fe.find,ge="find",ye=!0,me=de(ge);ge in[]&&Array(1)[ge](function(){ye=!1}),Lt({target:"Array",proto:!0,forced:ye||!me},{find:function(t){return ve(this,t,arguments.length>1?arguments[1]:void 0)}}),te(ge),oe("Array","find");var be=fe.findIndex,we="findIndex",Se=!0,Ee=de(we);we in[]&&Array(1)[we](function(){Se=!1}),Lt({target:"Array",proto:!0,forced:Se||!Ee},{findIndex:function(t){return be(this,t,arguments.length>1?arguments[1]:void 0)}}),te(we),oe("Array","findIndex");var xe=function(t,e,r,n,o,i,a,u){for(var s,c=o,f=0,l=!!a&&re(a,u,3);f0&&ie(s))c=xe(t,e,s,ft(s.length),c,i-1)-1;else{if(c>=9007199254740991)throw TypeError("Exceed the acceptable array length");t[c]=s}c++}f++}return c},Ae=xe;Lt({target:"Array",proto:!0},{flatMap:function(t){var e,r=Ut(this),n=ft(r.length);return ee(t),(e=ue(r,0)).length=Ae(e,r,r,n,0,1,t,arguments.length>1?arguments[1]:void 0),e}}),te("flatMap"),oe("Array","flatMap"),Lt({target:"Array",proto:!0},{flat:function(){var t=arguments.length?arguments[0]:void 0,e=Ut(this),r=ft(e.length),n=ue(e,0);return n.length=Ae(n,e,e,r,0,void 0===t?1:st(t)),n}}),te("flat"),oe("Array","flat");var Oe,Re,je,Pe=function(t){return function(e,r){var n,o,i=String(v(e)),a=st(r),u=i.length;return a<0||a>=u?t?"":void 0:(n=i.charCodeAt(a))<55296||n>56319||a+1===u||(o=i.charCodeAt(a+1))<56320||o>57343?t?i.charAt(a):n:t?i.slice(a,a+2):o-56320+(n-55296<<10)+65536}},Ie={codeAt:Pe(!1),charAt:Pe(!0)},Te=!o(function(){function t(){}return t.prototype.constructor=null,Object.getPrototypeOf(new t)!==t.prototype}),ke=V("IE_PROTO"),Le=Object.prototype,Ue=Te?Object.getPrototypeOf:function(t){return t=Ut(t),w(t,ke)?t[ke]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?Le:null},Me=qt("iterator"),_e=!1;[].keys&&("next"in(je=[].keys())?(Re=Ue(Ue(je)))!==Object.prototype&&(Oe=Re):_e=!0),null==Oe&&(Oe={}),w(Oe,Me)||T(Oe,Me,function(){return this});var Ne={IteratorPrototype:Oe,BUGGY_SAFARI_ITERATORS:_e},Ce=I.f,Fe=qt("toStringTag"),Be=function(t,e,r){t&&!w(t=r?t:t.prototype,Fe)&&Ce(t,Fe,{configurable:!0,value:e})},De={},qe=Ne.IteratorPrototype,ze=function(){return this},We=function(t,e,r){var n=e+" Iterator";return t.prototype=Jt(qe,{next:f(1,r)}),Be(t,n,!1),De[n]=ze,t},Ke=function(t){if(!y(t)&&null!==t)throw TypeError("Can't set "+String(t)+" as a prototype");return t},Ge=Object.setPrototypeOf||("__proto__"in{}?function(){var t,e=!1,r={};try{(t=Object.getOwnPropertyDescriptor(Object.prototype,"__proto__").set).call(r,[]),e=r instanceof Array}catch(t){}return function(r,n){return j(r),Ke(n),e?t.call(r,n):r.__proto__=n,r}}():void 0),$e=Ne.IteratorPrototype,Ve=Ne.BUGGY_SAFARI_ITERATORS,He=qt("iterator"),Xe="keys",Ye="values",Je="entries",Qe=function(){return this},Ze=function(t,e,r,n,o,i,a){We(r,e,n);var u,s,c,f=function(t){if(t===o&&v)return v;if(!Ve&&t in p)return p[t];switch(t){case Xe:case Ye:case Je:return function(){return new r(this,t)}}return function(){return new r(this)}},l=e+" Iterator",h=!1,p=t.prototype,d=p[He]||p["@@iterator"]||o&&p[o],v=!Ve&&d||f(o),g="Array"==e&&p.entries||d;if(g&&(u=Ue(g.call(new t)),$e!==Object.prototype&&u.next&&(Ue(u)!==$e&&(Ge?Ge(u,$e):"function"!=typeof u[He]&&T(u,He,Qe)),Be(u,l,!0))),o==Ye&&d&&d.name!==Ye&&(h=!0,v=function(){return d.call(this)}),p[He]!==v&&T(p,He,v),De[e]=v,o)if(s={values:f(Ye),keys:i?v:f(Xe),entries:f(Je)},a)for(c in s)(Ve||h||!(c in p))&&rt(p,c,s[c]);else Lt({target:e,proto:!0,forced:Ve||h},s);return s},tr=Ie.charAt,er="String Iterator",rr=et.set,nr=et.getterFor(er);Ze(String,"String",function(t){rr(this,{type:er,string:String(t),index:0})},function(){var t,e=nr(this),r=e.string,n=e.index;return n>=r.length?{value:void 0,done:!0}:(t=tr(r,n),e.index+=t.length,{value:t,done:!1})});var or=function(t,e,r,n){try{return n?e(j(r)[0],r[1]):e(r)}catch(e){var o=t.return;throw void 0!==o&&j(o.call(t)),e}},ir=qt("iterator"),ar=Array.prototype,ur=function(t){return void 0!==t&&(De.Array===t||ar[ir]===t)},sr=function(t,e,r){var n=m(e);n in t?I.f(t,n,f(0,r)):t[n]=r},cr={};cr[qt("toStringTag")]="z";var fr="[object z]"===String(cr),lr=qt("toStringTag"),hr="Arguments"==h(function(){return arguments}()),pr=fr?h:function(t){var e,r,n;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(r=function(t,e){try{return t[e]}catch(t){}}(e=Object(t),lr))?r:hr?h(e):"Object"==(n=h(e))&&"function"==typeof e.callee?"Arguments":n},dr=qt("iterator"),vr=function(t){if(null!=t)return t[dr]||t["@@iterator"]||De[pr(t)]},gr=function(t){var e,r,n,o,i,a,u=Ut(t),s="function"==typeof this?this:Array,c=arguments.length,f=c>1?arguments[1]:void 0,l=void 0!==f,h=vr(u),p=0;if(l&&(f=re(f,c>2?arguments[2]:void 0,2)),null==h||s==Array&&ur(h))for(r=new s(e=ft(u.length));e>p;p++)a=l?f(u[p],p):u[p],sr(r,p,a);else for(i=(o=h.call(u)).next,r=new s;!(n=i.call(o)).done;p++)a=l?or(o,f,[n.value,p],!0):n.value,sr(r,p,a);return r.length=p,r},yr=qt("iterator"),mr=!1;try{var br=0,wr={next:function(){return{done:!!br++}},return:function(){mr=!0}};wr[yr]=function(){return this},Array.from(wr,function(){throw 2})}catch(t){}var Sr=function(t,e){if(!e&&!mr)return!1;var r=!1;try{var n={};n[yr]=function(){return{next:function(){return{done:r=!0}}}},t(n)}catch(t){}return r},Er=!Sr(function(t){Array.from(t)});Lt({target:"Array",stat:!0,forced:Er},{from:gr});var xr=vt.includes,Ar=de("indexOf",{ACCESSORS:!0,1:0});Lt({target:"Array",proto:!0,forced:!Ar},{includes:function(t){return xr(this,t,arguments.length>1?arguments[1]:void 0)}}),te("includes"),oe("Array","includes");var Or="Array Iterator",Rr=et.set,jr=et.getterFor(Or),Pr=Ze(Array,"Array",function(t,e){Rr(this,{type:Or,target:g(t),index:0,kind:e})},function(){var t=jr(this),e=t.target,r=t.kind,n=t.index++;return!e||n>=e.length?(t.target=void 0,{value:void 0,done:!0}):"keys"==r?{value:n,done:!1}:"values"==r?{value:e[n],done:!1}:{value:[n,e[n]],done:!1}},"values");De.Arguments=De.Array,te("keys"),te("values"),te("entries"),oe("Array","values");var Ir=o(function(){function t(){}return!(Array.of.call(t)instanceof t)});Lt({target:"Array",stat:!0,forced:Ir},{of:function(){for(var t=0,e=arguments.length,r=new("function"==typeof this?this:Array)(e);e>t;)sr(r,t,arguments[t++]);return r.length=e,r}});var Tr=qt("hasInstance"),kr=Function.prototype;Tr in kr||I.f(kr,Tr,{value:function(t){if("function"!=typeof this||!y(t))return!1;if(!y(this.prototype))return t instanceof this;for(;t=Ue(t);)if(this.prototype===t)return!0;return!1}}),qt("hasInstance");var Lr=Function.prototype,Ur=Lr.toString,Mr=/^\s*function ([^ (]*)/,_r="name";i&&!(_r in Lr)&&(0,I.f)(Lr,_r,{configurable:!0,get:function(){try{return Ur.call(this).match(Mr)[1]}catch(t){return""}}});var Nr=!o(function(){return Object.isExtensible(Object.preventExtensions({}))}),Cr=e(function(t){var e=I.f,r=G("meta"),n=0,o=Object.isExtensible||function(){return!0},i=function(t){e(t,r,{value:{objectID:"O"+ ++n,weakData:{}}})},a=t.exports={REQUIRED:!1,fastKey:function(t,e){if(!y(t))return"symbol"==typeof t?t:("string"==typeof t?"S":"P")+t;if(!w(t,r)){if(!o(t))return"F";if(!e)return"E";i(t)}return t[r].objectID},getWeakData:function(t,e){if(!w(t,r)){if(!o(t))return!0;if(!e)return!1;i(t)}return t[r].weakData},onFreeze:function(t){return Nr&&a.REQUIRED&&o(t)&&!w(t,r)&&i(t),t}};H[r]=!0}),Fr=e(function(t){var e=function(t,e){this.stopped=t,this.result=e},r=t.exports=function(t,r,n,o,i){var a,u,s,c,f,l,h,p=re(r,n,o?2:1);if(i)a=t;else{if("function"!=typeof(u=vr(t)))throw TypeError("Target is not iterable");if(ur(u)){for(s=0,c=ft(t.length);c>s;s++)if((f=o?p(j(h=t[s])[0],h[1]):p(t[s]))&&f instanceof e)return f;return new e(!1)}a=u.call(t)}for(l=a.next;!(h=l.call(a)).done;)if("object"==typeof(f=or(a,p,h.value,o))&&f&&f instanceof e)return f;return new e(!1)};r.stop=function(t){return new e(!0,t)}}),Br=function(t,e,r){if(!(t instanceof e))throw TypeError("Incorrect "+(r?r+" ":"")+"invocation");return t},Dr=function(t,e,r){var n,o;return Ge&&"function"==typeof(n=e.constructor)&&n!==r&&y(o=n.prototype)&&o!==r.prototype&&Ge(t,o),t},qr=function(t,e,r){var i=-1!==t.indexOf("Map"),a=-1!==t.indexOf("Weak"),u=i?"set":"add",s=n[t],c=s&&s.prototype,f=s,l={},h=function(t){var e=c[t];rt(c,t,"add"==t?function(t){return e.call(this,0===t?0:t),this}:"delete"==t?function(t){return!(a&&!y(t))&&e.call(this,0===t?0:t)}:"get"==t?function(t){return a&&!y(t)?void 0:e.call(this,0===t?0:t)}:"has"==t?function(t){return!(a&&!y(t))&&e.call(this,0===t?0:t)}:function(t,r){return e.call(this,0===t?0:t,r),this})};if(Tt(t,"function"!=typeof s||!(a||c.forEach&&!o(function(){(new s).entries().next()}))))f=r.getConstructor(e,t,i,u),Cr.REQUIRED=!0;else if(Tt(t,!0)){var p=new f,d=p[u](a?{}:-0,1)!=p,v=o(function(){p.has(1)}),g=Sr(function(t){new s(t)}),m=!a&&o(function(){for(var t=new s,e=5;e--;)t[u](e,e);return!t.has(-0)});g||((f=e(function(e,r){Br(e,f,t);var n=Dr(new s,e,f);return null!=r&&Fr(r,n[u],n,i),n})).prototype=c,c.constructor=f),(v||m)&&(h("delete"),h("has"),i&&h("get")),(m||d)&&h(u),a&&c.clear&&delete c.clear}return l[t]=f,Lt({global:!0,forced:f!=s},l),Be(f,t),a||r.setStrong(f,t,i),f},zr=function(t,e,r){for(var n in e)rt(t,n,e[n],r);return t},Wr=qt("species"),Kr=function(t){var e=it(t);i&&e&&!e[Wr]&&(0,I.f)(e,Wr,{configurable:!0,get:function(){return this}})},Gr=I.f,$r=Cr.fastKey,Vr=et.set,Hr=et.getterFor,Xr={getConstructor:function(t,e,r,n){var o=t(function(t,a){Br(t,o,e),Vr(t,{type:e,index:Jt(null),first:void 0,last:void 0,size:0}),i||(t.size=0),null!=a&&Fr(a,t[n],t,r)}),a=Hr(e),u=function(t,e,r){var n,o,u=a(t),c=s(t,e);return c?c.value=r:(u.last=c={index:o=$r(e,!0),key:e,value:r,previous:n=u.last,next:void 0,removed:!1},u.first||(u.first=c),n&&(n.next=c),i?u.size++:t.size++,"F"!==o&&(u.index[o]=c)),t},s=function(t,e){var r,n=a(t),o=$r(e);if("F"!==o)return n.index[o];for(r=n.first;r;r=r.next)if(r.key==e)return r};return zr(o.prototype,{clear:function(){for(var t=a(this),e=t.index,r=t.first;r;)r.removed=!0,r.previous&&(r.previous=r.previous.next=void 0),delete e[r.index],r=r.next;t.first=t.last=void 0,i?t.size=0:this.size=0},delete:function(t){var e=this,r=a(e),n=s(e,t);if(n){var o=n.next,u=n.previous;delete r.index[n.index],n.removed=!0,u&&(u.next=o),o&&(o.previous=u),r.first==n&&(r.first=o),r.last==n&&(r.last=u),i?r.size--:e.size--}return!!n},forEach:function(t){for(var e,r=a(this),n=re(t,arguments.length>1?arguments[1]:void 0,3);e=e?e.next:r.first;)for(n(e.value,e.key,this);e&&e.removed;)e=e.previous},has:function(t){return!!s(this,t)}}),zr(o.prototype,r?{get:function(t){var e=s(this,t);return e&&e.value},set:function(t,e){return u(this,0===t?0:t,e)}}:{add:function(t){return u(this,t=0===t?0:t,t)}}),i&&Gr(o.prototype,"size",{get:function(){return a(this).size}}),o},setStrong:function(t,e,r){var n=e+" Iterator",o=Hr(e),i=Hr(n);Ze(t,e,function(t,e){Vr(this,{type:n,target:t,state:o(t),kind:e,last:void 0})},function(){for(var t=i(this),e=t.kind,r=t.last;r&&r.removed;)r=r.previous;return t.target&&(t.last=r=r?r.next:t.state.first)?"keys"==e?{value:r.key,done:!1}:"values"==e?{value:r.value,done:!1}:{value:[r.key,r.value],done:!1}:(t.target=void 0,{value:void 0,done:!0})},r?"entries":"values",!r,!0),Kr(e)}},Yr=qr("Map",function(t){return function(){return t(this,arguments.length?arguments[0]:void 0)}},Xr);fr||rt(Object.prototype,"toString",fr?{}.toString:function(){return"[object "+pr(this)+"]"},{unsafe:!0});var Jr={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0},Qr=qt("iterator"),Zr=qt("toStringTag"),tn=Pr.values;for(var en in Jr){var rn=n[en],nn=rn&&rn.prototype;if(nn){if(nn[Qr]!==tn)try{T(nn,Qr,tn)}catch(t){nn[Qr]=tn}if(nn[Zr]||T(nn,Zr,en),Jr[en])for(var on in Pr)if(nn[on]!==Pr[on])try{T(nn,on,Pr[on])}catch(t){nn[on]=Pr[on]}}}var an=function(t){var e,r,n,o,i=arguments.length,a=i>1?arguments[1]:void 0;return ee(this),(e=void 0!==a)&&ee(a),null==t?new this:(r=[],e?(n=0,o=re(a,i>2?arguments[2]:void 0,2),Fr(t,function(t){r.push(o(t,n++))})):Fr(t,r.push,r),new this(r))};Lt({target:"Map",stat:!0},{from:an});var un=function(){for(var t=arguments.length,e=new Array(t);t--;)e[t]=arguments[t];return new this(e)};Lt({target:"Map",stat:!0},{of:un});var sn=function(){for(var t,e=j(this),r=ee(e.delete),n=!0,o=0,i=arguments.length;o1?arguments[1]:void 0,3);return!Fr(r,function(t,r){if(!n(r,t,e))return Fr.stop()},void 0,!0,!0).stopped}});var ln=qt("species"),hn=function(t,e){var r,n=j(t).constructor;return void 0===n||null==(r=j(n)[ln])?e:ee(r)};Lt({target:"Map",proto:!0,real:!0,forced:q},{filter:function(t){var e=j(this),r=fn(e),n=re(t,arguments.length>1?arguments[1]:void 0,3),o=new(hn(e,it("Map"))),i=ee(o.set);return Fr(r,function(t,r){n(r,t,e)&&i.call(o,t,r)},void 0,!0,!0),o}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{find:function(t){var e=j(this),r=fn(e),n=re(t,arguments.length>1?arguments[1]:void 0,3);return Fr(r,function(t,r){if(n(r,t,e))return Fr.stop(r)},void 0,!0,!0).result}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{findKey:function(t){var e=j(this),r=fn(e),n=re(t,arguments.length>1?arguments[1]:void 0,3);return Fr(r,function(t,r){if(n(r,t,e))return Fr.stop(t)},void 0,!0,!0).result}}),Lt({target:"Map",stat:!0},{groupBy:function(t,e){var r=new this;ee(e);var n=ee(r.has),o=ee(r.get),i=ee(r.set);return Fr(t,function(t){var a=e(t);n.call(r,a)?o.call(r,a).push(t):i.call(r,a,[t])}),r}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{includes:function(t){return Fr(fn(j(this)),function(e,r){if((n=r)===(o=t)||n!=n&&o!=o)return Fr.stop();var n,o},void 0,!0,!0).stopped}}),Lt({target:"Map",stat:!0},{keyBy:function(t,e){var r=new this;ee(e);var n=ee(r.set);return Fr(t,function(t){n.call(r,e(t),t)}),r}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{keyOf:function(t){return Fr(fn(j(this)),function(e,r){if(r===t)return Fr.stop(e)},void 0,!0,!0).result}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{mapKeys:function(t){var e=j(this),r=fn(e),n=re(t,arguments.length>1?arguments[1]:void 0,3),o=new(hn(e,it("Map"))),i=ee(o.set);return Fr(r,function(t,r){i.call(o,n(r,t,e),r)},void 0,!0,!0),o}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{mapValues:function(t){var e=j(this),r=fn(e),n=re(t,arguments.length>1?arguments[1]:void 0,3),o=new(hn(e,it("Map"))),i=ee(o.set);return Fr(r,function(t,r){i.call(o,t,n(r,t,e))},void 0,!0,!0),o}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{merge:function(t){for(var e=j(this),r=ee(e.set),n=0;n1?arguments[1]:void 0,3);return Fr(r,function(t,r){if(n(r,t,e))return Fr.stop()},void 0,!0,!0).stopped}}),Lt({target:"Map",proto:!0,real:!0,forced:q},{update:function(t,e){var r=j(this),n=arguments.length;ee(e);var o=r.has(t);if(!o&&n<3)throw TypeError("Updating absent value");var i=o?r.get(t):ee(n>2?arguments[2]:void 0)(t,r);return r.set(t,e(i,t,r)),r}});var pn=function(t,e){var r,n=j(this),o=arguments.length>2?arguments[2]:void 0;if("function"!=typeof e&&"function"!=typeof o)throw TypeError("At least one callback required");return n.has(t)?(r=n.get(t),"function"==typeof e&&(r=e(r),n.set(t,r))):"function"==typeof o&&(r=o(),n.set(t,r)),r};Lt({target:"Map",proto:!0,real:!0,forced:q},{upsert:pn}),Lt({target:"Map",proto:!0,real:!0,forced:q},{updateOrInsert:pn});var dn="\t\n\v\f\r                 \u2028\u2029\ufeff",vn="["+dn+"]",gn=RegExp("^"+vn+vn+"*"),yn=RegExp(vn+vn+"*$"),mn=function(t){return function(e){var r=String(v(e));return 1&t&&(r=r.replace(gn,"")),2&t&&(r=r.replace(yn,"")),r}},bn={start:mn(1),end:mn(2),trim:mn(3)},wn=wt.f,Sn=R.f,En=I.f,xn=bn.trim,An="Number",On=n[An],Rn=On.prototype,jn=h(Jt(Rn))==An,Pn=function(t){var e,r,n,o,i,a,u,s,c=m(t,!1);if("string"==typeof c&&c.length>2)if(43===(e=(c=xn(c)).charCodeAt(0))||45===e){if(88===(r=c.charCodeAt(2))||120===r)return NaN}else if(48===e){switch(c.charCodeAt(1)){case 66:case 98:n=2,o=49;break;case 79:case 111:n=8,o=55;break;default:return+c}for(a=(i=c.slice(2)).length,u=0;uo)return NaN;return parseInt(i,n)}return+c};if(Tt(An,!On(" 0o1")||!On("0b1")||On("+0x1"))){for(var In,Tn=function(t){var e=arguments.length<1?0:t,r=this;return r instanceof Tn&&(jn?o(function(){Rn.valueOf.call(r)}):h(r)!=An)?Dr(new On(Pn(e)),r,Tn):Pn(e)},kn=i?wn(On):"MAX_VALUE,MIN_VALUE,NaN,NEGATIVE_INFINITY,POSITIVE_INFINITY,EPSILON,isFinite,isInteger,isNaN,isSafeInteger,MAX_SAFE_INTEGER,MIN_SAFE_INTEGER,parseFloat,parseInt,isInteger".split(","),Ln=0;kn.length>Ln;Ln++)w(On,In=kn[Ln])&&!w(Tn,In)&&En(Tn,In,Sn(On,In));Tn.prototype=Rn,Rn.constructor=Tn,rt(n,An,Tn)}Lt({target:"Number",stat:!0},{EPSILON:Math.pow(2,-52)});var Un=n.isFinite;Lt({target:"Number",stat:!0},{isFinite:Number.isFinite||function(t){return"number"==typeof t&&Un(t)}});var Mn=Math.floor,_n=function(t){return!y(t)&&isFinite(t)&&Mn(t)===t};Lt({target:"Number",stat:!0},{isInteger:_n}),Lt({target:"Number",stat:!0},{isNaN:function(t){return t!=t}});var Nn=Math.abs;Lt({target:"Number",stat:!0},{isSafeInteger:function(t){return _n(t)&&Nn(t)<=9007199254740991}}),Lt({target:"Number",stat:!0},{MAX_SAFE_INTEGER:9007199254740991}),Lt({target:"Number",stat:!0},{MIN_SAFE_INTEGER:-9007199254740991});var Cn=bn.trim,Fn=n.parseFloat,Bn=1/Fn(dn+"-0")!=-Infinity?function(t){var e=Cn(String(t)),r=Fn(e);return 0===r&&"-"==e.charAt(0)?-0:r}:Fn;Lt({target:"Number",stat:!0,forced:Number.parseFloat!=Bn},{parseFloat:Bn});var Dn=bn.trim,qn=n.parseInt,zn=/^[+-]?0[Xx]/,Wn=8!==qn(dn+"08")||22!==qn(dn+"0x16")?function(t,e){var r=Dn(String(t));return qn(r,e>>>0||(zn.test(r)?16:10))}:qn;Lt({target:"Number",stat:!0,forced:Number.parseInt!=Wn},{parseInt:Wn});var Kn=c.f,Gn=function(t){return function(e){for(var r,n=g(e),o=zt(n),a=o.length,u=0,s=[];a>u;)r=o[u++],i&&!Kn.call(n,r)||s.push(t?[r,n[r]]:n[r]);return s}},$n={entries:Gn(!0),values:Gn(!1)},Vn=$n.entries;Lt({target:"Object",stat:!0},{entries:function(t){return Vn(t)}}),Lt({target:"Object",stat:!0,sham:!i},{getOwnPropertyDescriptors:function(t){for(var e,r,n=g(t),o=R.f,i=Et(n),a={},u=0;i.length>u;)void 0!==(r=o(n,e=i[u++]))&&sr(a,e,r);return a}});var Hn=o(function(){zt(1)});Lt({target:"Object",stat:!0,forced:Hn},{keys:function(t){return zt(Ut(t))}});var Xn=Object.is||function(t,e){return t===e?0!==t||1/t==1/e:t!=t&&e!=e};Lt({target:"Object",stat:!0},{is:Xn});var Yn=$n.values;Lt({target:"Object",stat:!0},{values:function(t){return Yn(t)}});var Jn=it("Reflect","apply"),Qn=Function.apply,Zn=!o(function(){Jn(function(){})});Lt({target:"Reflect",stat:!0,forced:Zn},{apply:function(t,e,r){return ee(t),j(r),Jn?Jn(t,e,r):Qn.call(t,e,r)}});var to=[].slice,eo={},ro=Function.bind||function(t){var e=ee(this),r=to.call(arguments,1),n=function(){var o=r.concat(to.call(arguments));return this instanceof n?function(t,e,r){if(!(e in eo)){for(var n=[],o=0;o-1)&&(e=e.replace(/y/g,""));var a=Dr(hi?new si(t,e):si(t,e),n?this:ci,di);return pi&&r&&ai(a,{sticky:r}),a},vi=function(t){t in di||oi(di,t,{configurable:!0,get:function(){return si[t]},set:function(e){si[t]=e}})},gi=ii(si),yi=0;gi.length>yi;)vi(gi[yi++]);ci.constructor=di,di.prototype=ci,rt(n,"RegExp",di)}Kr("RegExp");var mi="toString",bi=RegExp.prototype,wi=bi[mi];(o(function(){return"/a/b"!=wi.call({source:"a",flags:"b"})})||wi.name!=mi)&&rt(RegExp.prototype,mi,function(){var t=j(this),e=String(t.source),r=t.flags;return"/"+e+"/"+String(void 0===r&&t instanceof RegExp&&!("flags"in bi)?Zo.call(t):r)},{unsafe:!0});var Si=RegExp.prototype.exec,Ei=String.prototype.replace,xi=Si,Ai=function(){var t=/a/,e=/b*/g;return Si.call(t,"a"),Si.call(e,"a"),0!==t.lastIndex||0!==e.lastIndex}(),Oi=ni.UNSUPPORTED_Y||ni.BROKEN_CARET,Ri=void 0!==/()??/.exec("")[1];(Ai||Ri||Oi)&&(xi=function(t){var e,r,n,o,i=this,a=Oi&&i.sticky,u=Zo.call(i),s=i.source,c=0,f=t;return a&&(-1===(u=u.replace("y","")).indexOf("g")&&(u+="g"),f=String(t).slice(i.lastIndex),i.lastIndex>0&&(!i.multiline||i.multiline&&"\n"!==t[i.lastIndex-1])&&(s="(?: "+s+")",f=" "+f,c++),r=new RegExp("^(?:"+s+")",u)),Ri&&(r=new RegExp("^"+s+"$(?!\\s)",u)),Ai&&(e=i.lastIndex),n=Si.call(a?r:i,f),a?n?(n.input=n.input.slice(c),n[0]=n[0].slice(c),n.index=i.lastIndex,i.lastIndex+=n[0].length):i.lastIndex=0:Ai&&n&&(i.lastIndex=i.global?n.index+n[0].length:e),Ri&&n&&n.length>1&&Ei.call(n[0],r,function(){for(o=1;o")}),Ni="$0"==="a".replace(/./,"$0"),Ci=qt("replace"),Fi=!!/./[Ci]&&""===/./[Ci]("a","$0"),Bi=!o(function(){var t=/(?:)/,e=t.exec;t.exec=function(){return e.apply(this,arguments)};var r="ab".split(t);return 2!==r.length||"a"!==r[0]||"b"!==r[1]}),Di=function(t,e,r,n){var i=qt(t),a=!o(function(){var e={};return e[i]=function(){return 7},7!=""[t](e)}),u=a&&!o(function(){var e=!1,r=/a/;return"split"===t&&((r={}).constructor={},r.constructor[Mi]=function(){return r},r.flags="",r[i]=/./[i]),r.exec=function(){return e=!0,null},r[i](""),!e});if(!a||!u||"replace"===t&&(!_i||!Ni||Fi)||"split"===t&&!Bi){var s=/./[i],c=r(i,""[t],function(t,e,r,n,o){return e.exec===ji?a&&!o?{done:!0,value:s.call(e,r,n)}:{done:!0,value:t.call(r,e,n)}:{done:!1}},{REPLACE_KEEPS_$0:Ni,REGEXP_REPLACE_SUBSTITUTES_UNDEFINED_CAPTURE:Fi}),f=c[1];rt(String.prototype,t,c[0]),rt(RegExp.prototype,i,2==e?function(t,e){return f.call(t,this,e)}:function(t){return f.call(t,this)})}n&&T(RegExp.prototype[i],"sham",!0)},qi=Ie.charAt,zi=function(t,e,r){return e+(r?qi(t,e).length:1)},Wi=function(t,e){var r=t.exec;if("function"==typeof r){var n=r.call(t,e);if("object"!=typeof n)throw TypeError("RegExp exec method returned something other than an Object or null");return n}if("RegExp"!==h(t))throw TypeError("RegExp#exec called on incompatible receiver");return ji.call(t,e)};Di("match",1,function(t,e,r){return[function(e){var r=v(this),n=null==e?void 0:e[t];return void 0!==n?n.call(e,r):new RegExp(e)[t](String(r))},function(t){var n=r(e,t,this);if(n.done)return n.value;var o=j(t),i=String(this);if(!o.global)return Wi(o,i);var a=o.unicode;o.lastIndex=0;for(var u,s=[],c=0;null!==(u=Wi(o,i));){var f=String(u[0]);s[c]=f,""===f&&(o.lastIndex=zi(i,ft(o.lastIndex),a)),c++}return 0===c?null:s}]});var Ki=Math.max,Gi=Math.min,$i=Math.floor,Vi=/\$([$&'`]|\d\d?|<[^>]*>)/g,Hi=/\$([$&'`]|\d\d?)/g;Di("replace",2,function(t,e,r,n){var o=n.REGEXP_REPLACE_SUBSTITUTES_UNDEFINED_CAPTURE,i=n.REPLACE_KEEPS_$0,a=o?"$":"$0";return[function(r,n){var o=v(this),i=null==r?void 0:r[t];return void 0!==i?i.call(r,o,n):e.call(String(o),r,n)},function(t,n){if(!o&&i||"string"==typeof n&&-1===n.indexOf(a)){var s=r(e,t,this,n);if(s.done)return s.value}var c=j(t),f=String(this),l="function"==typeof n;l||(n=String(n));var h=c.global;if(h){var p=c.unicode;c.lastIndex=0}for(var d=[];;){var v=Wi(c,f);if(null===v)break;if(d.push(v),!h)break;""===String(v[0])&&(c.lastIndex=zi(f,ft(c.lastIndex),p))}for(var g,y="",m=0,b=0;b=m&&(y+=f.slice(m,S)+R,m=S+w.length)}return y+f.slice(m)}];function u(t,r,n,o,i,a){var u=n+t.length,s=o.length,c=Hi;return void 0!==i&&(i=Ut(i),c=Vi),e.call(a,c,function(e,a){var c;switch(a.charAt(0)){case"$":return"$";case"&":return t;case"`":return r.slice(0,n);case"'":return r.slice(u);case"<":c=i[a.slice(1,-1)];break;default:var f=+a;if(0===f)return e;if(f>s){var l=$i(f/10);return 0===l?e:l<=s?void 0===o[l-1]?a.charAt(1):o[l-1]+a.charAt(1):e}c=o[f-1]}return void 0===c?"":c})}}),Di("search",1,function(t,e,r){return[function(e){var r=v(this),n=null==e?void 0:e[t];return void 0!==n?n.call(e,r):new RegExp(e)[t](String(r))},function(t){var n=r(e,t,this);if(n.done)return n.value;var o=j(t),i=String(this),a=o.lastIndex;Xn(a,0)||(o.lastIndex=0);var u=Wi(o,i);return Xn(o.lastIndex,a)||(o.lastIndex=a),null===u?-1:u.index}]});var Xi=[].push,Yi=Math.min,Ji=4294967295,Qi=!o(function(){return!RegExp(Ji,"y")});Di("split",2,function(t,e,r){var n;return n="c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length?function(t,r){var n=String(v(this)),o=void 0===r?Ji:r>>>0;if(0===o)return[];if(void 0===t)return[n];if(!Qo(t))return e.call(n,t,o);for(var i,a,u,s=[],c=0,f=new RegExp(t.source,(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":"")+"g");(i=ji.call(f,n))&&!((a=f.lastIndex)>c&&(s.push(n.slice(c,i.index)),i.length>1&&i.index=o));)f.lastIndex===i.index&&f.lastIndex++;return c===n.length?!u&&f.test("")||s.push(""):s.push(n.slice(c)),s.length>o?s.slice(0,o):s}:"0".split(void 0,0).length?function(t,r){return void 0===t&&0===r?[]:e.call(this,t,r)}:e,[function(e,r){var o=v(this),i=null==e?void 0:e[t];return void 0!==i?i.call(e,o,r):n.call(String(o),e,r)},function(t,o){var i=r(n,t,this,o,n!==e);if(i.done)return i.value;var a=j(t),u=String(this),s=hn(a,RegExp),c=a.unicode,f=new s(Qi?a:"^(?:"+a.source+")",(a.ignoreCase?"i":"")+(a.multiline?"m":"")+(a.unicode?"u":"")+(Qi?"y":"g")),l=void 0===o?Ji:o>>>0;if(0===l)return[];if(0===u.length)return null===Wi(f,u)?[u]:[];for(var h=0,p=0,d=[];p1?arguments[1]:void 0,3);return!Fr(r,function(t){if(!n(t,t,e))return Fr.stop()},void 0,!1,!0).stopped}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{difference:function(t){var e=j(this),r=new(hn(e,it("Set")))(e),n=ee(r.delete);return Fr(t,function(t){n.call(r,t)}),r}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{filter:function(t){var e=j(this),r=ta(e),n=re(t,arguments.length>1?arguments[1]:void 0,3),o=new(hn(e,it("Set"))),i=ee(o.add);return Fr(r,function(t){n(t,t,e)&&i.call(o,t)},void 0,!1,!0),o}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{find:function(t){var e=j(this),r=ta(e),n=re(t,arguments.length>1?arguments[1]:void 0,3);return Fr(r,function(t){if(n(t,t,e))return Fr.stop(t)},void 0,!1,!0).result}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{intersection:function(t){var e=j(this),r=new(hn(e,it("Set"))),n=ee(e.has),o=ee(r.add);return Fr(t,function(t){n.call(e,t)&&o.call(r,t)}),r}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{isDisjointFrom:function(t){var e=j(this),r=ee(e.has);return!Fr(t,function(t){if(!0===r.call(e,t))return Fr.stop()}).stopped}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{isSubsetOf:function(t){var e=cn(this),r=j(t),n=r.has;return"function"!=typeof n&&(r=new(it("Set"))(t),n=ee(r.has)),!Fr(e,function(t){if(!1===n.call(r,t))return Fr.stop()},void 0,!1,!0).stopped}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{isSupersetOf:function(t){var e=j(this),r=ee(e.has);return!Fr(t,function(t){if(!1===r.call(e,t))return Fr.stop()}).stopped}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{join:function(t){var e=j(this),r=ta(e),n=void 0===t?",":String(t),o=[];return Fr(r,o.push,o,!1,!0),o.join(n)}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{map:function(t){var e=j(this),r=ta(e),n=re(t,arguments.length>1?arguments[1]:void 0,3),o=new(hn(e,it("Set"))),i=ee(o.add);return Fr(r,function(t){i.call(o,n(t,t,e))},void 0,!1,!0),o}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{reduce:function(t){var e=j(this),r=ta(e),n=arguments.length<2,o=n?void 0:arguments[1];if(ee(t),Fr(r,function(r){n?(n=!1,o=r):o=t(o,r,r,e)},void 0,!1,!0),n)throw TypeError("Reduce of empty set with no initial value");return o}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{some:function(t){var e=j(this),r=ta(e),n=re(t,arguments.length>1?arguments[1]:void 0,3);return Fr(r,function(t){if(n(t,t,e))return Fr.stop()},void 0,!1,!0).stopped}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{symmetricDifference:function(t){var e=j(this),r=new(hn(e,it("Set")))(e),n=ee(r.delete),o=ee(r.add);return Fr(t,function(t){n.call(r,t)||o.call(r,t)}),r}}),Lt({target:"Set",proto:!0,real:!0,forced:q},{union:function(t){var e=j(this),r=new(hn(e,it("Set")))(e);return Fr(t,ee(r.add),r),r}});var ea,ra,na=it("navigator","userAgent")||"",oa=n.process,ia=oa&&oa.versions,aa=ia&&ia.v8;aa?ra=(ea=aa.split("."))[0]+ea[1]:na&&(!(ea=na.match(/Edge\/(\d+)/))||ea[1]>=74)&&(ea=na.match(/Chrome\/(\d+)/))&&(ra=ea[1]);var ua=ra&&+ra,sa=qt("species"),ca=qt("isConcatSpreadable"),fa=9007199254740991,la="Maximum allowed index exceeded",ha=ua>=51||!o(function(){var t=[];return t[ca]=!1,t.concat()[0]!==t}),pa=ua>=51||!o(function(){var t=[];return(t.constructor={})[sa]=function(){return{foo:1}},1!==t.concat(Boolean).foo}),da=function(t){if(!y(t))return!1;var e=t[ca];return void 0!==e?!!e:ie(t)};Lt({target:"Array",proto:!0,forced:!ha||!pa},{concat:function(t){var e,r,n,o,i,a=Ut(this),u=ue(a,0),s=0;for(e=-1,n=arguments.length;efa)throw TypeError(la);for(r=0;r=fa)throw TypeError(la);sr(u,s++,i)}return u.length=s,u}});var va=wt.f,ga={}.toString,ya="object"==typeof window&&window&&Object.getOwnPropertyNames?Object.getOwnPropertyNames(window):[],ma={f:function(t){return ya&&"[object Window]"==ga.call(t)?function(t){try{return va(t)}catch(t){return ya.slice()}}(t):va(g(t))}},ba={f:qt},wa=I.f,Sa=function(t){var e=nt.Symbol||(nt.Symbol={});w(e,t)||wa(e,t,{value:ba.f(t)})},Ea=fe.forEach,xa=V("hidden"),Aa="Symbol",Oa="prototype",Ra=qt("toPrimitive"),ja=et.set,Pa=et.getterFor(Aa),Ia=Object[Oa],Ta=n.Symbol,ka=it("JSON","stringify"),La=R.f,Ua=I.f,Ma=ma.f,_a=c.f,Na=z("symbols"),Ca=z("op-symbols"),Fa=z("string-to-symbol-registry"),Ba=z("symbol-to-string-registry"),Da=z("wks"),qa=n.QObject,za=!qa||!qa[Oa]||!qa[Oa].findChild,Wa=i&&o(function(){return 7!=Jt(Ua({},"a",{get:function(){return Ua(this,"a",{value:7}).a}})).a})?function(t,e,r){var n=La(Ia,e);n&&delete Ia[e],Ua(t,e,r),n&&t!==Ia&&Ua(Ia,e,n)}:Ua,Ka=function(t,e){var r=Na[t]=Jt(Ta[Oa]);return ja(r,{type:Aa,tag:t,description:e}),i||(r.description=e),r},Ga=Ct?function(t){return"symbol"==typeof t}:function(t){return Object(t)instanceof Ta},$a=function(t,e,r){t===Ia&&$a(Ca,e,r),j(t);var n=m(e,!0);return j(r),w(Na,n)?(r.enumerable?(w(t,xa)&&t[xa][n]&&(t[xa][n]=!1),r=Jt(r,{enumerable:f(0,!1)})):(w(t,xa)||Ua(t,xa,f(1,{})),t[xa][n]=!0),Wa(t,n,r)):Ua(t,n,r)},Va=function(t,e){j(t);var r=g(e),n=zt(r).concat(Ja(r));return Ea(n,function(e){i&&!Ha.call(r,e)||$a(t,e,r[e])}),t},Ha=function(t){var e=m(t,!0),r=_a.call(this,e);return!(this===Ia&&w(Na,e)&&!w(Ca,e))&&(!(r||!w(this,e)||!w(Na,e)||w(this,xa)&&this[xa][e])||r)},Xa=function(t,e){var r=g(t),n=m(e,!0);if(r!==Ia||!w(Na,n)||w(Ca,n)){var o=La(r,n);return!o||!w(Na,n)||w(r,xa)&&r[xa][n]||(o.enumerable=!0),o}},Ya=function(t){var e=Ma(g(t)),r=[];return Ea(e,function(t){w(Na,t)||w(H,t)||r.push(t)}),r},Ja=function(t){var e=t===Ia,r=Ma(e?Ca:g(t)),n=[];return Ea(r,function(t){!w(Na,t)||e&&!w(Ia,t)||n.push(Na[t])}),n};if(Nt||(Ta=function(){if(this instanceof Ta)throw TypeError("Symbol is not a constructor");var t=arguments.length&&void 0!==arguments[0]?String(arguments[0]):void 0,e=G(t),r=function(t){this===Ia&&r.call(Ca,t),w(this,xa)&&w(this[xa],e)&&(this[xa][e]=!1),Wa(this,e,f(1,t))};return i&&za&&Wa(Ia,e,{configurable:!0,set:r}),Ka(e,t)},rt(Ta[Oa],"toString",function(){return Pa(this).tag}),rt(Ta,"withoutSetter",function(t){return Ka(G(t),t)}),c.f=Ha,I.f=$a,R.f=Xa,wt.f=ma.f=Ya,St.f=Ja,ba.f=function(t){return Ka(qt(t),t)},i&&(Ua(Ta[Oa],"description",{configurable:!0,get:function(){return Pa(this).description}}),rt(Ia,"propertyIsEnumerable",Ha,{unsafe:!0}))),Lt({global:!0,wrap:!0,forced:!Nt,sham:!Nt},{Symbol:Ta}),Ea(zt(Da),function(t){Sa(t)}),Lt({target:Aa,stat:!0,forced:!Nt},{for:function(t){var e=String(t);if(w(Fa,e))return Fa[e];var r=Ta(e);return Fa[e]=r,Ba[r]=e,r},keyFor:function(t){if(!Ga(t))throw TypeError(t+" is not a symbol");if(w(Ba,t))return Ba[t]},useSetter:function(){za=!0},useSimple:function(){za=!1}}),Lt({target:"Object",stat:!0,forced:!Nt,sham:!i},{create:function(t,e){return void 0===e?Jt(t):Va(Jt(t),e)},defineProperty:$a,defineProperties:Va,getOwnPropertyDescriptor:Xa}),Lt({target:"Object",stat:!0,forced:!Nt},{getOwnPropertyNames:Ya,getOwnPropertySymbols:Ja}),Lt({target:"Object",stat:!0,forced:o(function(){St.f(1)})},{getOwnPropertySymbols:function(t){return St.f(Ut(t))}}),ka){var Qa=!Nt||o(function(){var t=Ta();return"[null]"!=ka([t])||"{}"!=ka({a:t})||"{}"!=ka(Object(t))});Lt({target:"JSON",stat:!0,forced:Qa},{stringify:function(t,e,r){for(var n,o=[t],i=1;arguments.length>i;)o.push(arguments[i++]);if(n=e,(y(e)||void 0!==t)&&!Ga(t))return ie(e)||(e=function(t,e){if("function"==typeof n&&(e=n.call(this,t,e)),!Ga(e))return e}),o[1]=e,ka.apply(null,o)}})}Ta[Oa][Ra]||T(Ta[Oa],Ra,Ta[Oa].valueOf),Be(Ta,Aa),H[xa]=!0,Sa("asyncIterator");var Za=I.f,tu=n.Symbol;if(i&&"function"==typeof tu&&(!("description"in tu.prototype)||void 0!==tu().description)){var eu={},ru=function(){var t=arguments.length<1||void 0===arguments[0]?void 0:String(arguments[0]),e=this instanceof ru?new tu(t):void 0===t?tu():tu(t);return""===t&&(eu[e]=!0),e};xt(ru,tu);var nu=ru.prototype=tu.prototype;nu.constructor=ru;var ou=nu.toString,iu="Symbol(test)"==String(tu("test")),au=/^Symbol\((.*)\)[^)]+$/;Za(nu,"description",{configurable:!0,get:function(){var t=y(this)?this.valueOf():this,e=ou.call(t);if(w(eu,t))return"";var r=iu?e.slice(7,-1):e.replace(au,"$1");return""===r?void 0:r}}),Lt({global:!0,forced:!0},{Symbol:ru})}Sa("hasInstance"),Sa("isConcatSpreadable"),Sa("iterator"),Sa("match"),Sa("matchAll"),Sa("replace"),Sa("search"),Sa("species"),Sa("split"),Sa("toPrimitive"),Sa("toStringTag"),Sa("unscopables"),Be(Math,"Math",!0),Be(n.JSON,"JSON",!0),Sa("asyncDispose"),Sa("dispose"),Sa("observable"),Sa("patternMatch"),Sa("replaceAll"),ba.f("asyncIterator");var uu=Ie.codeAt;Lt({target:"String",proto:!0},{codePointAt:function(t){return uu(this,t)}}),oe("String","codePointAt");var su,cu=function(t){if(Qo(t))throw TypeError("The method doesn't accept regular expressions");return t},fu=qt("match"),lu=function(t){var e=/./;try{"/./"[t](e)}catch(r){try{return e[fu]=!1,"/./"[t](e)}catch(t){}}return!1},hu=R.f,pu="".endsWith,du=Math.min,vu=lu("endsWith"),gu=!(vu||(su=hu(String.prototype,"endsWith"),!su||su.writable));Lt({target:"String",proto:!0,forced:!gu&&!vu},{endsWith:function(t){var e=String(v(this));cu(t);var r=arguments.length>1?arguments[1]:void 0,n=ft(e.length),o=void 0===r?n:du(ft(r),n),i=String(t);return pu?pu.call(e,i,o):e.slice(o-i.length,o)===i}}),oe("String","endsWith");var yu=String.fromCharCode,mu=String.fromCodePoint;Lt({target:"String",stat:!0,forced:!!mu&&1!=mu.length},{fromCodePoint:function(t){for(var e,r=[],n=arguments.length,o=0;n>o;){if(e=+arguments[o++],pt(e,1114111)!==e)throw RangeError(e+" is not a valid code point");r.push(e<65536?yu(e):yu(55296+((e-=65536)>>10),e%1024+56320))}return r.join("")}}),Lt({target:"String",proto:!0,forced:!lu("includes")},{includes:function(t){return!!~String(v(this)).indexOf(cu(t),arguments.length>1?arguments[1]:void 0)}}),oe("String","includes");var bu="".repeat||function(t){var e=String(v(this)),r="",n=st(t);if(n<0||Infinity==n)throw RangeError("Wrong number of repetitions");for(;n>0;(n>>>=1)&&(e+=e))1&n&&(r+=e);return r},wu=Math.ceil,Su=function(t){return function(e,r,n){var o,i,a=String(v(e)),u=a.length,s=void 0===n?" ":String(n),c=ft(r);return c<=u||""==s?a:((i=bu.call(s,wu((o=c-u)/s.length))).length>o&&(i=i.slice(0,o)),t?a+i:i+a)}},Eu={start:Su(!1),end:Su(!0)},xu=/Version\/10\.\d+(\.\d+)?( Mobile\/\w+)? Safari\//.test(na),Au=Eu.start;Lt({target:"String",proto:!0,forced:xu},{padStart:function(t){return Au(this,t,arguments.length>1?arguments[1]:void 0)}}),oe("String","padStart");var Ou=Eu.end;Lt({target:"String",proto:!0,forced:xu},{padEnd:function(t){return Ou(this,t,arguments.length>1?arguments[1]:void 0)}}),oe("String","padEnd"),Lt({target:"String",stat:!0},{raw:function(t){for(var e=g(t.raw),r=ft(e.length),n=arguments.length,o=[],i=0;r>i;)o.push(String(e[i++])),i1?arguments[1]:void 0,e.length)),n=String(t);return ju?ju.call(e,n,r):e.slice(r,r+n.length)===n}}),oe("String","startsWith");var ku=function(t){return o(function(){return!!dn[t]()||"​…᠎"!="​…᠎"[t]()||dn[t].name!==t})},Lu=bn.start,Uu=ku("trimStart"),Mu=Uu?function(){return Lu(this)}:"".trimStart;Lt({target:"String",proto:!0,forced:Uu},{trimStart:Mu,trimLeft:Mu}),oe("String","trimLeft");var _u=bn.end,Nu=ku("trimEnd"),Cu=Nu?function(){return _u(this)}:"".trimEnd;Lt({target:"String",proto:!0,forced:Nu},{trimEnd:Cu,trimRight:Cu}),oe("String","trimRight");var Fu=qt("iterator"),Bu=!o(function(){var t=new URL("b?a=1&b=2&c=3","http://a"),e=t.searchParams,r="";return t.pathname="c%20d",e.forEach(function(t,n){e.delete("b"),r+=n+t}),!e.sort||"http://a/c%20d?a=1&c=3"!==t.href||"3"!==e.get("c")||"a=1"!==String(new URLSearchParams("?a=1"))||!e[Fu]||"a"!==new URL("https://a@b").username||"b"!==new URLSearchParams(new URLSearchParams("a=b")).get("a")||"xn--e1aybc"!==new URL("http://тест").host||"#%D0%B1"!==new URL("http://a#б").hash||"a1c3"!==r||"x"!==new URL("http://x",void 0).host}),Du=Object.assign,qu=Object.defineProperty,zu=!Du||o(function(){if(i&&1!==Du({b:1},Du(qu({},"a",{enumerable:!0,get:function(){qu(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var t={},e={},r=Symbol(),n="abcdefghijklmnopqrst";return t[r]=7,n.split("").forEach(function(t){e[t]=t}),7!=Du({},t)[r]||zt(Du({},e)).join("")!=n})?function(t,e){for(var r=Ut(t),n=arguments.length,o=1,a=St.f,u=c.f;n>o;)for(var s,f=d(arguments[o++]),l=a?zt(f).concat(a(f)):zt(f),h=l.length,p=0;h>p;)s=l[p++],i&&!u.call(f,s)||(r[s]=f[s]);return r}:Du,Wu=2147483647,Ku=/[^\0-\u007E]/,Gu=/[.\u3002\uFF0E\uFF61]/g,$u="Overflow: input needs wider integers to process",Vu=Math.floor,Hu=String.fromCharCode,Xu=function(t){return t+22+75*(t<26)},Yu=function(t,e,r){var n=0;for(t=r?Vu(t/700):t>>1,t+=Vu(t/e);t>455;n+=36)t=Vu(t/35);return Vu(n+36*t/(t+38))},Ju=function(t){var e=[];t=function(t){for(var e=[],r=0,n=t.length;r=55296&&o<=56319&&r=i&&nVu((Wu-a)/l))throw RangeError($u);for(a+=(f-i)*l,i=f,r=0;rWu)throw RangeError($u);if(n==i){for(var h=a,p=36;;p+=36){var d=p<=u?1:p>=u+26?26:p-u;if(h0?arguments[0]:void 0,f=[];if(ns(this,{type:es,entries:f,updateURL:function(){},updateSearchParams:gs}),void 0!==c)if(y(c))if("function"==typeof(t=vr(c)))for(r=(e=t.call(c)).next;!(n=r.call(e)).done;){if((a=(i=(o=cn(j(n.value))).next).call(o)).done||(u=i.call(o)).done||!i.call(o).done)throw TypeError("Expected sequence with length 2");f.push({key:a.value+"",value:u.value+""})}else for(s in c)w(c,s)&&f.push({key:s,value:c[s]+""});else vs(f,"string"==typeof c?"?"===c.charAt(0)?c.slice(1):c:c+"")},ws=bs.prototype;zr(ws,{append:function(t,e){ys(arguments.length,2);var r=os(this);r.entries.push({key:t+"",value:e+""}),r.updateURL()},delete:function(t){ys(arguments.length,1);for(var e=os(this),r=e.entries,n=t+"",o=0;ot.key){o.splice(e,0,t);break}e===r&&o.push(t)}n.updateURL()},forEach:function(t){for(var e,r=os(this).entries,n=re(t,arguments.length>1?arguments[1]:void 0,3),o=0;o1&&(y(e=arguments[1])&&pr(r=e.body)===es&&((n=e.headers?new Zu(e.headers):new Zu).has("content-type")||n.set("content-type","application/x-www-form-urlencoded;charset=UTF-8"),e=Jt(e,{body:f(0,String(r)),headers:f(0,n)})),o.push(e)),Qu.apply(this,o)}});var Ss,Es={URLSearchParams:bs,getState:os},xs=Ie.codeAt,As=n.URL,Os=Es.URLSearchParams,Rs=Es.getState,js=et.set,Ps=et.getterFor("URL"),Is=Math.floor,Ts=Math.pow,ks="Invalid scheme",Ls="Invalid host",Us="Invalid port",Ms=/[A-Za-z]/,_s=/[\d+-.A-Za-z]/,Ns=/\d/,Cs=/^(0x|0X)/,Fs=/^[0-7]+$/,Bs=/^\d+$/,Ds=/^[\dA-Fa-f]+$/,qs=/[\u0000\u0009\u000A\u000D #%/:?@[\\]]/,zs=/[\u0000\u0009\u000A\u000D #/:?@[\\]]/,Ws=/^[\u0000-\u001F ]+|[\u0000-\u001F ]+$/g,Ks=/[\u0009\u000A\u000D]/g,Gs=function(t,e){var r,n,o;if("["==e.charAt(0)){if("]"!=e.charAt(e.length-1))return Ls;if(!(r=Vs(e.slice(1,-1))))return Ls;t.host=r}else if(ec(t)){if(e=function(t){var e,r,n=[],o=t.toLowerCase().replace(Gu,".").split(".");for(e=0;e4)return t;for(r=[],n=0;n1&&"0"==o.charAt(0)&&(i=Cs.test(o)?16:8,o=o.slice(8==i?1:2)),""===o)a=0;else{if(!(10==i?Bs:8==i?Fs:Ds).test(o))return t;a=parseInt(o,i)}r.push(a)}for(n=0;n=Ts(256,5-e))return null}else if(a>255)return null;for(u=r.pop(),n=0;n6)return;for(n=0;h();){if(o=null,n>0){if(!("."==h()&&n<4))return;l++}if(!Ns.test(h()))return;for(;Ns.test(h());){if(i=parseInt(h(),10),null===o)o=i;else{if(0==o)return;o=10*o+i}if(o>255)return;l++}s[c]=256*s[c]+o,2!=++n&&4!=n||c++}if(4!=n)return;break}if(":"==h()){if(l++,!h())return}else if(h())return;s[c++]=e}else{if(null!==f)return;l++,f=++c}}if(null!==f)for(a=c-f,c=7;0!=c&&a>0;)u=s[c],s[c--]=s[f+a-1],s[f+--a]=u;else if(8!=c)return;return s},Hs=function(t){var e,r,n,o;if("number"==typeof t){for(e=[],r=0;r<4;r++)e.unshift(t%256),t=Is(t/256);return e.join(".")}if("object"==typeof t){for(e="",n=function(t){for(var e=null,r=1,n=null,o=0,i=0;i<8;i++)0!==t[i]?(o>r&&(e=n,r=o),n=null,o=0):(null===n&&(n=i),++o);return o>r&&(e=n,r=o),e}(t),r=0;r<8;r++)o&&0===t[r]||(o&&(o=!1),n===r?(e+=r?":":"::",o=!0):(e+=t[r].toString(16),r<7&&(e+=":")));return"["+e+"]"}return t},Xs={},Ys=zu({},Xs,{" ":1,'"':1,"<":1,">":1,"`":1}),Js=zu({},Ys,{"#":1,"?":1,"{":1,"}":1}),Qs=zu({},Js,{"/":1,":":1,";":1,"=":1,"@":1,"[":1,"\\":1,"]":1,"^":1,"|":1}),Zs=function(t,e){var r=xs(t,0);return r>32&&r<127&&!w(e,t)?t:encodeURIComponent(t)},tc={ftp:21,file:null,http:80,https:443,ws:80,wss:443},ec=function(t){return w(tc,t.scheme)},rc=function(t){return""!=t.username||""!=t.password},nc=function(t){return!t.host||t.cannotBeABaseURL||"file"==t.scheme},oc=function(t,e){var r;return 2==t.length&&Ms.test(t.charAt(0))&&(":"==(r=t.charAt(1))||!e&&"|"==r)},ic=function(t){var e;return t.length>1&&oc(t.slice(0,2))&&(2==t.length||"/"===(e=t.charAt(2))||"\\"===e||"?"===e||"#"===e)},ac=function(t){var e=t.path,r=e.length;!r||"file"==t.scheme&&1==r&&oc(e[0],!0)||e.pop()},uc=function(t){return"."===t||"%2e"===t.toLowerCase()},sc={},cc={},fc={},lc={},hc={},pc={},dc={},vc={},gc={},yc={},mc={},bc={},wc={},Sc={},Ec={},xc={},Ac={},Oc={},Rc={},jc={},Pc={},Ic=function(t,e,r,n){var o,i,a,u,s,c=r||sc,f=0,l="",h=!1,p=!1,d=!1;for(r||(t.scheme="",t.username="",t.password="",t.host=null,t.port=null,t.path=[],t.query=null,t.fragment=null,t.cannotBeABaseURL=!1,e=e.replace(Ws,"")),e=e.replace(Ks,""),o=gr(e);f<=o.length;){switch(i=o[f],c){case sc:if(!i||!Ms.test(i)){if(r)return ks;c=fc;continue}l+=i.toLowerCase(),c=cc;break;case cc:if(i&&(_s.test(i)||"+"==i||"-"==i||"."==i))l+=i.toLowerCase();else{if(":"!=i){if(r)return ks;l="",c=fc,f=0;continue}if(r&&(ec(t)!=w(tc,l)||"file"==l&&(rc(t)||null!==t.port)||"file"==t.scheme&&!t.host))return;if(t.scheme=l,r)return void(ec(t)&&tc[t.scheme]==t.port&&(t.port=null));l="","file"==t.scheme?c=Sc:ec(t)&&n&&n.scheme==t.scheme?c=lc:ec(t)?c=vc:"/"==o[f+1]?(c=hc,f++):(t.cannotBeABaseURL=!0,t.path.push(""),c=Rc)}break;case fc:if(!n||n.cannotBeABaseURL&&"#"!=i)return ks;if(n.cannotBeABaseURL&&"#"==i){t.scheme=n.scheme,t.path=n.path.slice(),t.query=n.query,t.fragment="",t.cannotBeABaseURL=!0,c=Pc;break}c="file"==n.scheme?Sc:pc;continue;case lc:if("/"!=i||"/"!=o[f+1]){c=pc;continue}c=gc,f++;break;case hc:if("/"==i){c=yc;break}c=Oc;continue;case pc:if(t.scheme=n.scheme,i==Ss)t.username=n.username,t.password=n.password,t.host=n.host,t.port=n.port,t.path=n.path.slice(),t.query=n.query;else if("/"==i||"\\"==i&&ec(t))c=dc;else if("?"==i)t.username=n.username,t.password=n.password,t.host=n.host,t.port=n.port,t.path=n.path.slice(),t.query="",c=jc;else{if("#"!=i){t.username=n.username,t.password=n.password,t.host=n.host,t.port=n.port,t.path=n.path.slice(),t.path.pop(),c=Oc;continue}t.username=n.username,t.password=n.password,t.host=n.host,t.port=n.port,t.path=n.path.slice(),t.query=n.query,t.fragment="",c=Pc}break;case dc:if(!ec(t)||"/"!=i&&"\\"!=i){if("/"!=i){t.username=n.username,t.password=n.password,t.host=n.host,t.port=n.port,c=Oc;continue}c=yc}else c=gc;break;case vc:if(c=gc,"/"!=i||"/"!=l.charAt(f+1))continue;f++;break;case gc:if("/"!=i&&"\\"!=i){c=yc;continue}break;case yc:if("@"==i){h&&(l="%40"+l),h=!0,a=gr(l);for(var v=0;v65535)return Us;t.port=ec(t)&&m===tc[t.scheme]?null:m,l=""}if(r)return;c=Ac;continue}return Us}l+=i;break;case Sc:if(t.scheme="file","/"==i||"\\"==i)c=Ec;else{if(!n||"file"!=n.scheme){c=Oc;continue}if(i==Ss)t.host=n.host,t.path=n.path.slice(),t.query=n.query;else if("?"==i)t.host=n.host,t.path=n.path.slice(),t.query="",c=jc;else{if("#"!=i){ic(o.slice(f).join(""))||(t.host=n.host,t.path=n.path.slice(),ac(t)),c=Oc;continue}t.host=n.host,t.path=n.path.slice(),t.query=n.query,t.fragment="",c=Pc}}break;case Ec:if("/"==i||"\\"==i){c=xc;break}n&&"file"==n.scheme&&!ic(o.slice(f).join(""))&&(oc(n.path[0],!0)?t.path.push(n.path[0]):t.host=n.host),c=Oc;continue;case xc:if(i==Ss||"/"==i||"\\"==i||"?"==i||"#"==i){if(!r&&oc(l))c=Oc;else if(""==l){if(t.host="",r)return;c=Ac}else{if(u=Gs(t,l))return u;if("localhost"==t.host&&(t.host=""),r)return;l="",c=Ac}continue}l+=i;break;case Ac:if(ec(t)){if(c=Oc,"/"!=i&&"\\"!=i)continue}else if(r||"?"!=i)if(r||"#"!=i){if(i!=Ss&&(c=Oc,"/"!=i))continue}else t.fragment="",c=Pc;else t.query="",c=jc;break;case Oc:if(i==Ss||"/"==i||"\\"==i&&ec(t)||!r&&("?"==i||"#"==i)){if(".."===(s=(s=l).toLowerCase())||"%2e."===s||".%2e"===s||"%2e%2e"===s?(ac(t),"/"==i||"\\"==i&&ec(t)||t.path.push("")):uc(l)?"/"==i||"\\"==i&&ec(t)||t.path.push(""):("file"==t.scheme&&!t.path.length&&oc(l)&&(t.host&&(t.host=""),l=l.charAt(0)+":"),t.path.push(l)),l="","file"==t.scheme&&(i==Ss||"?"==i||"#"==i))for(;t.path.length>1&&""===t.path[0];)t.path.shift();"?"==i?(t.query="",c=jc):"#"==i&&(t.fragment="",c=Pc)}else l+=Zs(i,Js);break;case Rc:"?"==i?(t.query="",c=jc):"#"==i?(t.fragment="",c=Pc):i!=Ss&&(t.path[0]+=Zs(i,Xs));break;case jc:r||"#"!=i?i!=Ss&&("'"==i&&ec(t)?t.query+="%27":t.query+="#"==i?"%23":Zs(i,Xs)):(t.fragment="",c=Pc);break;case Pc:i!=Ss&&(t.fragment+=Zs(i,Ys))}f++}},Tc=function(t){var e,r,n=Br(this,Tc,"URL"),o=arguments.length>1?arguments[1]:void 0,a=String(t),u=js(n,{type:"URL"});if(void 0!==o)if(o instanceof Tc)e=Ps(o);else if(r=Ic(e={},String(o)))throw TypeError(r);if(r=Ic(u,a,null,e))throw TypeError(r);var s=u.searchParams=new Os,c=Rs(s);c.updateSearchParams(u.query),c.updateURL=function(){u.query=String(s)||null},i||(n.href=Lc.call(n),n.origin=Uc.call(n),n.protocol=Mc.call(n),n.username=_c.call(n),n.password=Nc.call(n),n.host=Cc.call(n),n.hostname=Fc.call(n),n.port=Bc.call(n),n.pathname=Dc.call(n),n.search=qc.call(n),n.searchParams=zc.call(n),n.hash=Wc.call(n))},kc=Tc.prototype,Lc=function(){var t=Ps(this),e=t.scheme,r=t.username,n=t.password,o=t.host,i=t.port,a=t.path,u=t.query,s=t.fragment,c=e+":";return null!==o?(c+="//",rc(t)&&(c+=r+(n?":"+n:"")+"@"),c+=Hs(o),null!==i&&(c+=":"+i)):"file"==e&&(c+="//"),c+=t.cannotBeABaseURL?a[0]:a.length?"/"+a.join("/"):"",null!==u&&(c+="?"+u),null!==s&&(c+="#"+s),c},Uc=function(){var t=Ps(this),e=t.scheme,r=t.port;if("blob"==e)try{return new URL(e.path[0]).origin}catch(t){return"null"}return"file"!=e&&ec(t)?e+"://"+Hs(t.host)+(null!==r?":"+r:""):"null"},Mc=function(){return Ps(this).scheme+":"},_c=function(){return Ps(this).username},Nc=function(){return Ps(this).password},Cc=function(){var t=Ps(this),e=t.host,r=t.port;return null===e?"":null===r?Hs(e):Hs(e)+":"+r},Fc=function(){var t=Ps(this).host;return null===t?"":Hs(t)},Bc=function(){var t=Ps(this).port;return null===t?"":String(t)},Dc=function(){var t=Ps(this),e=t.path;return t.cannotBeABaseURL?e[0]:e.length?"/"+e.join("/"):""},qc=function(){var t=Ps(this).query;return t?"?"+t:""},zc=function(){return Ps(this).searchParams},Wc=function(){var t=Ps(this).fragment;return t?"#"+t:""},Kc=function(t,e){return{get:t,set:e,configurable:!0,enumerable:!0}};if(i&&Wt(kc,{href:Kc(Lc,function(t){var e=Ps(this),r=String(t),n=Ic(e,r);if(n)throw TypeError(n);Rs(e.searchParams).updateSearchParams(e.query)}),origin:Kc(Uc),protocol:Kc(Mc,function(t){var e=Ps(this);Ic(e,String(t)+":",sc)}),username:Kc(_c,function(t){var e=Ps(this),r=gr(String(t));if(!nc(e)){e.username="";for(var n=0;nr;)e.push(arguments[r++]);return af[++of]=function(){("function"==typeof t?t:Function(t)).apply(void 0,e)},Vc(of),of},tf=function(t){delete af[t]},"process"==h(ef)?Vc=function(t){ef.nextTick(cf(t))}:nf&&nf.now?Vc=function(t){nf.now(cf(t))}:rf&&!Jc?(Xc=(Hc=new rf).port2,Hc.port1.onmessage=ff,Vc=re(Xc.postMessage,Xc,1)):!n.addEventListener||"function"!=typeof postMessage||n.importScripts||o(lf)||"file:"===Qc.protocol?Vc=uf in x("script")?function(t){Kt.appendChild(x("script"))[uf]=function(){Kt.removeChild(this),sf(t)}}:function(t){setTimeout(cf(t),0)}:(Vc=lf,n.addEventListener("message",ff,!1)));var hf,pf,df,vf,gf,yf,mf,bf,wf={set:Zc,clear:tf},Sf=R.f,Ef=wf.set,xf=n.MutationObserver||n.WebKitMutationObserver,Af=n.process,Of=n.Promise,Rf="process"==h(Af),jf=Sf(n,"queueMicrotask"),Pf=jf&&jf.value;Pf||(hf=function(){var t,e;for(Rf&&(t=Af.domain)&&t.exit();pf;){e=pf.fn,pf=pf.next;try{e()}catch(t){throw pf?vf():df=void 0,t}}df=void 0,t&&t.enter()},Rf?vf=function(){Af.nextTick(hf)}:xf&&!Jc?(gf=!0,yf=document.createTextNode(""),new xf(hf).observe(yf,{characterData:!0}),vf=function(){yf.data=gf=!gf}):Of&&Of.resolve?(mf=Of.resolve(void 0),bf=mf.then,vf=function(){bf.call(mf,hf)}):vf=function(){Ef.call(n,hf)});var If,Tf,kf,Lf,Uf=Pf||function(t){var e={fn:t,next:void 0};df&&(df.next=e),pf||(pf=e,vf()),df=e},Mf=function(t){var e,r;this.promise=new t(function(t,n){if(void 0!==e||void 0!==r)throw TypeError("Bad Promise constructor");e=t,r=n}),this.resolve=ee(e),this.reject=ee(r)},_f={f:function(t){return new Mf(t)}},Nf=function(t,e){if(j(t),y(e)&&e.constructor===t)return e;var r=_f.f(t);return(0,r.resolve)(e),r.promise},Cf=function(t){try{return{error:!1,value:t()}}catch(t){return{error:!0,value:t}}},Ff=wf.set,Bf=qt("species"),Df="Promise",qf=et.get,zf=et.set,Wf=et.getterFor(Df),Kf=Yc,Gf=n.TypeError,$f=n.document,Vf=n.process,Hf=it("fetch"),Xf=_f.f,Yf=Xf,Jf="process"==h(Vf),Qf=!!($f&&$f.createEvent&&n.dispatchEvent),Zf="unhandledrejection",tl=Tt(Df,function(){if(F(Kf)===String(Kf)){if(66===ua)return!0;if(!Jf&&"function"!=typeof PromiseRejectionEvent)return!0}if(ua>=51&&/native code/.test(Kf))return!1;var t=Kf.resolve(1),e=function(t){t(function(){},function(){})};return(t.constructor={})[Bf]=e,!(t.then(function(){})instanceof e)}),el=tl||!Sr(function(t){Kf.all(t).catch(function(){})}),rl=function(t){var e;return!(!y(t)||"function"!=typeof(e=t.then))&&e},nl=function(t,e,r){if(!e.notified){e.notified=!0;var n=e.reactions;Uf(function(){for(var o=e.value,i=1==e.state,a=0;n.length>a;){var u,s,c,f=n[a++],l=i?f.ok:f.fail,h=f.resolve,p=f.reject,d=f.domain;try{l?(i||(2===e.rejection&&ul(t,e),e.rejection=1),!0===l?u=o:(d&&d.enter(),u=l(o),d&&(d.exit(),c=!0)),u===f.promise?p(Gf("Promise-chain cycle")):(s=rl(u))?s.call(u,h,p):h(u)):p(o)}catch(t){d&&!c&&d.exit(),p(t)}}e.reactions=[],e.notified=!1,r&&!e.rejection&&il(t,e)})}},ol=function(t,e,r){var o,i;Qf?((o=$f.createEvent("Event")).promise=e,o.reason=r,o.initEvent(t,!1,!0),n.dispatchEvent(o)):o={promise:e,reason:r},(i=n["on"+t])?i(o):t===Zf&&function(t,e){var r=n.console;r&&r.error&&(1===arguments.length?r.error(t):r.error(t,e))}("Unhandled promise rejection",r)},il=function(t,e){Ff.call(n,function(){var r,n=e.value;if(al(e)&&(r=Cf(function(){Jf?Vf.emit("unhandledRejection",n,t):ol(Zf,t,n)}),e.rejection=Jf||al(e)?2:1,r.error))throw r.value})},al=function(t){return 1!==t.rejection&&!t.parent},ul=function(t,e){Ff.call(n,function(){Jf?Vf.emit("rejectionHandled",t):ol("rejectionhandled",t,e.value)})},sl=function(t,e,r,n){return function(o){t(e,r,o,n)}},cl=function(t,e,r,n){e.done||(e.done=!0,n&&(e=n),e.value=r,e.state=2,nl(t,e,!0))},fl=function(t,e,r,n){if(!e.done){e.done=!0,n&&(e=n);try{if(t===r)throw Gf("Promise can't be resolved itself");var o=rl(r);o?Uf(function(){var n={done:!1};try{o.call(r,sl(fl,t,n,e),sl(cl,t,n,e))}catch(r){cl(t,n,r,e)}}):(e.value=r,e.state=1,nl(t,e,!1))}catch(r){cl(t,{done:!1},r,e)}}};tl&&(Kf=function(t){Br(this,Kf,Df),ee(t),If.call(this);var e=qf(this);try{t(sl(fl,this,e),sl(cl,this,e))}catch(t){cl(this,e,t)}},(If=function(t){zf(this,{type:Df,done:!1,notified:!1,parent:!1,reactions:[],rejection:!1,state:0,value:void 0})}).prototype=zr(Kf.prototype,{then:function(t,e){var r=Wf(this),n=Xf(hn(this,Kf));return n.ok="function"!=typeof t||t,n.fail="function"==typeof e&&e,n.domain=Jf?Vf.domain:void 0,r.parent=!0,r.reactions.push(n),0!=r.state&&nl(this,r,!1),n.promise},catch:function(t){return this.then(void 0,t)}}),Tf=function(){var t=new If,e=qf(t);this.promise=t,this.resolve=sl(fl,t,e),this.reject=sl(cl,t,e)},_f.f=Xf=function(t){return t===Kf||t===kf?new Tf(t):Yf(t)},"function"==typeof Yc&&(Lf=Yc.prototype.then,rt(Yc.prototype,"then",function(t,e){var r=this;return new Kf(function(t,e){Lf.call(r,t,e)}).then(t,e)},{unsafe:!0}),"function"==typeof Hf&&Lt({global:!0,enumerable:!0,forced:!0},{fetch:function(t){return Nf(Kf,Hf.apply(n,arguments))}}))),Lt({global:!0,wrap:!0,forced:tl},{Promise:Kf}),Be(Kf,Df,!1),Kr(Df),kf=it(Df),Lt({target:Df,stat:!0,forced:tl},{reject:function(t){var e=Xf(this);return e.reject.call(void 0,t),e.promise}}),Lt({target:Df,stat:!0,forced:tl},{resolve:function(t){return Nf(this,t)}}),Lt({target:Df,stat:!0,forced:el},{all:function(t){var e=this,r=Xf(e),n=r.resolve,o=r.reject,i=Cf(function(){var r=ee(e.resolve),i=[],a=0,u=1;Fr(t,function(t){var s=a++,c=!1;i.push(void 0),u++,r.call(e,t).then(function(t){c||(c=!0,i[s]=t,--u||n(i))},o)}),--u||n(i)});return i.error&&o(i.value),r.promise},race:function(t){var e=this,r=Xf(e),n=r.reject,o=Cf(function(){var o=ee(e.resolve);Fr(t,function(t){o.call(e,t).then(r.resolve,n)})});return o.error&&n(o.value),r.promise}}),Lt({target:"Promise",stat:!0},{allSettled:function(t){var e=this,r=_f.f(e),n=r.resolve,o=r.reject,i=Cf(function(){var r=ee(e.resolve),o=[],i=0,a=1;Fr(t,function(t){var u=i++,s=!1;o.push(void 0),a++,r.call(e,t).then(function(t){s||(s=!0,o[u]={status:"fulfilled",value:t},--a||n(o))},function(t){s||(s=!0,o[u]={status:"rejected",reason:t},--a||n(o))})}),--a||n(o)});return i.error&&o(i.value),r.promise}});var ll=!!Yc&&o(function(){Yc.prototype.finally.call({then:function(){}},function(){})});Lt({target:"Promise",proto:!0,real:!0,forced:ll},{finally:function(t){var e=hn(this,it("Promise")),r="function"==typeof t;return this.then(r?function(r){return Nf(e,t()).then(function(){return r})}:t,r?function(r){return Nf(e,t()).then(function(){throw r})}:t)}}),"function"!=typeof Yc||Yc.prototype.finally||rt(Yc.prototype,"finally",it("Promise").prototype.finally);var hl=et.set,pl=et.getterFor("AggregateError"),dl=function(t,e){var r=this;if(!(r instanceof dl))return new dl(t,e);Ge&&(r=Ge(new Error(e),Ue(r)));var n=[];return Fr(t,n.push,n),i?hl(r,{errors:n,type:"AggregateError"}):r.errors=n,void 0!==e&&T(r,"message",String(e)),r};dl.prototype=Jt(Error.prototype,{constructor:f(5,dl),message:f(5,""),name:f(5,"AggregateError")}),i&&I.f(dl.prototype,"errors",{get:function(){return pl(this).errors},configurable:!0}),Lt({global:!0},{AggregateError:dl}),Lt({target:"Promise",stat:!0},{try:function(t){var e=_f.f(this),r=Cf(t);return(r.error?e.reject:e.resolve)(r.value),e.promise}});var vl="No one promise resolved";Lt({target:"Promise",stat:!0},{any:function(t){var e=this,r=_f.f(e),n=r.resolve,o=r.reject,i=Cf(function(){var r=ee(e.resolve),i=[],a=0,u=1,s=!1;Fr(t,function(t){var c=a++,f=!1;i.push(void 0),u++,r.call(e,t).then(function(t){f||s||(s=!0,n(t))},function(t){f||s||(f=!0,i[c]=t,--u||o(new(it("AggregateError"))(i,vl)))})}),--u||o(new(it("AggregateError"))(i,vl))});return i.error&&o(i.value),r.promise}}),oe("Promise","finally");var gl="URLSearchParams"in self,yl="Symbol"in self&&"iterator"in Symbol,ml="FileReader"in self&&"Blob"in self&&function(){try{return new Blob,!0}catch(t){return!1}}(),bl="FormData"in self,wl="ArrayBuffer"in self;if(wl)var Sl=["[object Int8Array]","[object Uint8Array]","[object Uint8ClampedArray]","[object Int16Array]","[object Uint16Array]","[object Int32Array]","[object Uint32Array]","[object Float32Array]","[object Float64Array]"],El=ArrayBuffer.isView||function(t){return t&&Sl.indexOf(Object.prototype.toString.call(t))>-1};function xl(t){if("string"!=typeof t&&(t=String(t)),/[^a-z0-9\-#$%&'*+.^_`|~]/i.test(t))throw new TypeError("Invalid character in header field name");return t.toLowerCase()}function Al(t){return"string"!=typeof t&&(t=String(t)),t}function Ol(t){var e={next:function(){var e=t.shift();return{done:void 0===e,value:e}}};return yl&&(e[Symbol.iterator]=function(){return e}),e}function Rl(t){this.map={},t instanceof Rl?t.forEach(function(t,e){this.append(e,t)},this):Array.isArray(t)?t.forEach(function(t){this.append(t[0],t[1])},this):t&&Object.getOwnPropertyNames(t).forEach(function(e){this.append(e,t[e])},this)}function jl(t){if(t.bodyUsed)return Promise.reject(new TypeError("Already read"));t.bodyUsed=!0}function Pl(t){return new Promise(function(e,r){t.onload=function(){e(t.result)},t.onerror=function(){r(t.error)}})}function Il(t){var e=new FileReader,r=Pl(e);return e.readAsArrayBuffer(t),r}function Tl(t){if(t.slice)return t.slice(0);var e=new Uint8Array(t.byteLength);return e.set(new Uint8Array(t)),e.buffer}function kl(){return this.bodyUsed=!1,this._initBody=function(t){var e;this._bodyInit=t,t?"string"==typeof t?this._bodyText=t:ml&&Blob.prototype.isPrototypeOf(t)?this._bodyBlob=t:bl&&FormData.prototype.isPrototypeOf(t)?this._bodyFormData=t:gl&&URLSearchParams.prototype.isPrototypeOf(t)?this._bodyText=t.toString():wl&&ml&&(e=t)&&DataView.prototype.isPrototypeOf(e)?(this._bodyArrayBuffer=Tl(t.buffer),this._bodyInit=new Blob([this._bodyArrayBuffer])):wl&&(ArrayBuffer.prototype.isPrototypeOf(t)||El(t))?this._bodyArrayBuffer=Tl(t):this._bodyText=t=Object.prototype.toString.call(t):this._bodyText="",this.headers.get("content-type")||("string"==typeof t?this.headers.set("content-type","text/plain;charset=UTF-8"):this._bodyBlob&&this._bodyBlob.type?this.headers.set("content-type",this._bodyBlob.type):gl&&URLSearchParams.prototype.isPrototypeOf(t)&&this.headers.set("content-type","application/x-www-form-urlencoded;charset=UTF-8"))},ml&&(this.blob=function(){var t=jl(this);if(t)return t;if(this._bodyBlob)return Promise.resolve(this._bodyBlob);if(this._bodyArrayBuffer)return Promise.resolve(new Blob([this._bodyArrayBuffer]));if(this._bodyFormData)throw new Error("could not read FormData body as blob");return Promise.resolve(new Blob([this._bodyText]))},this.arrayBuffer=function(){return this._bodyArrayBuffer?jl(this)||Promise.resolve(this._bodyArrayBuffer):this.blob().then(Il)}),this.text=function(){var t=jl(this);if(t)return t;if(this._bodyBlob)return function(t){var e=new FileReader,r=Pl(e);return e.readAsText(t),r}(this._bodyBlob);if(this._bodyArrayBuffer)return Promise.resolve(function(t){for(var e=new Uint8Array(t),r=new Array(e.length),n=0;n-1?n:r),this.mode=e.mode||this.mode||null,this.signal=e.signal||this.signal,this.referrer=null,("GET"===this.method||"HEAD"===this.method)&&o)throw new TypeError("Body not allowed for GET or HEAD requests");this._initBody(o)}function Ml(t){var e=new FormData;return t.trim().split("&").forEach(function(t){if(t){var r=t.split("="),n=r.shift().replace(/\+/g," "),o=r.join("=").replace(/\+/g," ");e.append(decodeURIComponent(n),decodeURIComponent(o))}}),e}function _l(t,e){e||(e={}),this.type="default",this.status=void 0===e.status?200:e.status,this.ok=this.status>=200&&this.status<300,this.statusText="statusText"in e?e.statusText:"OK",this.headers=new Rl(e.headers),this.url=e.url||"",this._initBody(t)}Ul.prototype.clone=function(){return new Ul(this,{body:this._bodyInit})},kl.call(Ul.prototype),kl.call(_l.prototype),_l.prototype.clone=function(){return new _l(this._bodyInit,{status:this.status,statusText:this.statusText,headers:new Rl(this.headers),url:this.url})},_l.error=function(){var t=new _l(null,{status:0,statusText:""});return t.type="error",t};var Nl=[301,302,303,307,308];_l.redirect=function(t,e){if(-1===Nl.indexOf(e))throw new RangeError("Invalid status code");return new _l(null,{status:e,headers:{location:t}})};var Cl=self.DOMException;try{new Cl}catch(t){(Cl=function(t,e){this.message=t,this.name=e;var r=Error(t);this.stack=r.stack}).prototype=Object.create(Error.prototype),Cl.prototype.constructor=Cl}function Fl(t,e){return new Promise(function(r,n){var o=new Ul(t,e);if(o.signal&&o.signal.aborted)return n(new Cl("Aborted","AbortError"));var i=new XMLHttpRequest;function a(){i.abort()}i.onload=function(){var t,e,n={status:i.status,statusText:i.statusText,headers:(t=i.getAllResponseHeaders()||"",e=new Rl,t.replace(/\r?\n[\t ]+/g," ").split(/\r?\n/).forEach(function(t){var r=t.split(":"),n=r.shift().trim();if(n){var o=r.join(":").trim();e.append(n,o)}}),e)};n.url="responseURL"in i?i.responseURL:n.headers.get("X-Request-URL"),r(new _l("response"in i?i.response:i.responseText,n))},i.onerror=function(){n(new TypeError("Network request failed"))},i.ontimeout=function(){n(new TypeError("Network request failed"))},i.onabort=function(){n(new Cl("Aborted","AbortError"))},i.open(o.method,o.url,!0),"include"===o.credentials?i.withCredentials=!0:"omit"===o.credentials&&(i.withCredentials=!1),"responseType"in i&&ml&&(i.responseType="blob"),o.headers.forEach(function(t,e){i.setRequestHeader(e,t)}),o.signal&&(o.signal.addEventListener("abort",a),i.onreadystatechange=function(){4===i.readyState&&o.signal.removeEventListener("abort",a)}),i.send(void 0===o._bodyInit?null:o._bodyInit)})}Fl.polyfill=!0,self.fetch||(self.fetch=Fl,self.Headers=Rl,self.Request=Ul,self.Response=_l);var Bl=Object.getOwnPropertySymbols,Dl=Object.prototype.hasOwnProperty,ql=Object.prototype.propertyIsEnumerable,zl=function(){try{if(!Object.assign)return!1;var t=new String("abc");if(t[5]="de","5"===Object.getOwnPropertyNames(t)[0])return!1;for(var e={},r=0;r<10;r++)e["_"+String.fromCharCode(r)]=r;if("0123456789"!==Object.getOwnPropertyNames(e).map(function(t){return e[t]}).join(""))return!1;var n={};return"abcdefghijklmnopqrst".split("").forEach(function(t){n[t]=t}),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},n)).join("")}catch(t){return!1}}()?Object.assign:function(t,e){for(var r,n,o=function(t){if(null==t)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(t)}(t),i=1;i 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/Yan233th/so-vits-svc-models/flask_api.py b/spaces/Yan233th/so-vits-svc-models/flask_api.py deleted file mode 100644 index 8cc236a1c34c9ddeddea99bcea13024fb0ccc90b..0000000000000000000000000000000000000000 --- a/spaces/Yan233th/so-vits-svc-models/flask_api.py +++ /dev/null @@ -1,56 +0,0 @@ -import io -import logging - -import soundfile -import torch -import torchaudio -from flask import Flask, request, send_file -from flask_cors import CORS - -from inference.infer_tool import Svc, RealTimeVC - -app = Flask(__name__) - -CORS(app) - -logging.getLogger('numba').setLevel(logging.WARNING) - - -@app.route("/voiceChangeModel", methods=["POST"]) -def voice_change_model(): - request_form = request.form - wave_file = request.files.get("sample", None) - # 变调信息 - f_pitch_change = float(request_form.get("fPitchChange", 0)) - # DAW所需的采样率 - daw_sample = int(float(request_form.get("sampleRate", 0))) - speaker_id = int(float(request_form.get("sSpeakId", 0))) - # http获得wav文件并转换 - input_wav_path = io.BytesIO(wave_file.read()) - - # 模型推理 - if raw_infer: - out_audio, out_sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - tar_audio = torchaudio.functional.resample(out_audio, svc_model.target_sample, daw_sample) - else: - out_audio = svc.process(svc_model, speaker_id, f_pitch_change, input_wav_path) - tar_audio = torchaudio.functional.resample(torch.from_numpy(out_audio), svc_model.target_sample, daw_sample) - # 返回音频 - out_wav_path = io.BytesIO() - soundfile.write(out_wav_path, tar_audio.cpu().numpy(), daw_sample, format="wav") - out_wav_path.seek(0) - return send_file(out_wav_path, download_name="temp.wav", as_attachment=True) - - -if __name__ == '__main__': - # 启用则为直接切片合成,False为交叉淡化方式 - # vst插件调整0.3-0.5s切片时间可以降低延迟,直接切片方法会有连接处爆音、交叉淡化会有轻微重叠声音 - # 自行选择能接受的方法,或将vst最大切片时间调整为1s,此处设为Ture,延迟大音质稳定一些 - raw_infer = True - # 每个模型和config是唯一对应的 - model_name = "logs/32k/G_174000-Copy1.pth" - config_name = "configs/config.json" - svc_model = Svc(model_name, config_name) - svc = RealTimeVC() - # 此处与vst插件对应,不建议更改 - app.run(port=6842, host="0.0.0.0", debug=False, threaded=False) diff --git a/spaces/abdvl/datahub_qa_bot/docs/architecture/metadata-ingestion.md b/spaces/abdvl/datahub_qa_bot/docs/architecture/metadata-ingestion.md deleted file mode 100644 index 2b60383319c684933ca334157e9d6a0a36d9ae6b..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/architecture/metadata-ingestion.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "Ingestion Framework" ---- - -# Metadata Ingestion Architecture - -DataHub supports an extremely flexible ingestion architecture that can support push, pull, asynchronous and synchronous models. -The figure below describes all the options possible for connecting your favorite system to DataHub. -![Ingestion Architecture](../imgs/ingestion-architecture.png) - -## Metadata Change Proposal: The Center Piece - -The center piece for ingestion are [Metadata Change Proposal]s which represent requests to make a metadata change to an organization's Metadata Graph. -Metadata Change Proposals can be sent over Kafka, for highly scalable async publishing from source systems. They can also be sent directly to the HTTP endpoint exposed by the DataHub service tier to get synchronous success / failure responses. - -## Pull-based Integration - -DataHub ships with a Python based [metadata-ingestion system](../../metadata-ingestion/README.md) that can connect to different sources to pull metadata from them. This metadata is then pushed via Kafka or HTTP to the DataHub storage tier. Metadata ingestion pipelines can be [integrated with Airflow](../../metadata-ingestion/README.md#lineage-with-airflow) to set up scheduled ingestion or capture lineage. If you don't find a source already supported, it is very easy to [write your own](../../metadata-ingestion/README.md#contributing). - -## Push-based Integration - -As long as you can emit a [Metadata Change Proposal (MCP)] event to Kafka or make a REST call over HTTP, you can integrate any system with DataHub. For convenience, DataHub also provides simple [Python emitters] for you to integrate into your systems to emit metadata changes (MCP-s) at the point of origin. - -## Internal Components - -### Applying Metadata Change Proposals to DataHub Metadata Service (mce-consumer-job) - -DataHub comes with a Spring job, [mce-consumer-job], which consumes the Metadata Change Proposals and writes them into the DataHub Metadata Service (datahub-gms) using the `/ingest` endpoint. - -[Metadata Change Proposal (MCP)]: ../what/mxe.md#metadata-change-proposal-mcp -[Metadata Change Proposal]: ../what/mxe.md#metadata-change-proposal-mcp -[Metadata Change Log (MCL)]: ../what/mxe.md#metadata-change-log-mcl -[equivalent Pegasus format]: https://linkedin.github.io/rest.li/how_data_is_represented_in_memory#the-data-template-layer -[mce-consumer-job]: ../../metadata-jobs/mce-consumer-job -[Python emitters]: ../../metadata-ingestion/README.md#using-as-a-library - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/parallel/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/parallel/__init__.py deleted file mode 100644 index 2ed2c17ad357742e423beeaf4d35db03fe9af469..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/parallel/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .collate import collate -from .data_container import DataContainer -from .data_parallel import MMDataParallel -from .distributed import MMDistributedDataParallel -from .registry import MODULE_WRAPPERS -from .scatter_gather import scatter, scatter_kwargs -from .utils import is_module_wrapper - -__all__ = [ - 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', - 'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS' -] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/transforms.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/transforms.py deleted file mode 100644 index df55b0a496516bf7373fe96cf746c561dd713c3b..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/transforms.py +++ /dev/null @@ -1,240 +0,0 @@ -import numpy as np -import torch - - -def bbox_flip(bboxes, img_shape, direction='horizontal'): - """Flip bboxes horizontally or vertically. - - Args: - bboxes (Tensor): Shape (..., 4*k) - img_shape (tuple): Image shape. - direction (str): Flip direction, options are "horizontal", "vertical", - "diagonal". Default: "horizontal" - - Returns: - Tensor: Flipped bboxes. - """ - assert bboxes.shape[-1] % 4 == 0 - assert direction in ['horizontal', 'vertical', 'diagonal'] - flipped = bboxes.clone() - if direction == 'horizontal': - flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] - flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] - elif direction == 'vertical': - flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] - flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] - else: - flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] - flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] - flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] - flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] - return flipped - - -def bbox_mapping(bboxes, - img_shape, - scale_factor, - flip, - flip_direction='horizontal'): - """Map bboxes from the original image scale to testing scale.""" - new_bboxes = bboxes * bboxes.new_tensor(scale_factor) - if flip: - new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) - return new_bboxes - - -def bbox_mapping_back(bboxes, - img_shape, - scale_factor, - flip, - flip_direction='horizontal'): - """Map bboxes from testing scale to original image scale.""" - new_bboxes = bbox_flip(bboxes, img_shape, - flip_direction) if flip else bboxes - new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) - return new_bboxes.view(bboxes.shape) - - -def bbox2roi(bbox_list): - """Convert a list of bboxes to roi format. - - Args: - bbox_list (list[Tensor]): a list of bboxes corresponding to a batch - of images. - - Returns: - Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] - """ - rois_list = [] - for img_id, bboxes in enumerate(bbox_list): - if bboxes.size(0) > 0: - img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) - rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) - else: - rois = bboxes.new_zeros((0, 5)) - rois_list.append(rois) - rois = torch.cat(rois_list, 0) - return rois - - -def roi2bbox(rois): - """Convert rois to bounding box format. - - Args: - rois (torch.Tensor): RoIs with the shape (n, 5) where the first - column indicates batch id of each RoI. - - Returns: - list[torch.Tensor]: Converted boxes of corresponding rois. - """ - bbox_list = [] - img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) - for img_id in img_ids: - inds = (rois[:, 0] == img_id.item()) - bbox = rois[inds, 1:] - bbox_list.append(bbox) - return bbox_list - - -def bbox2result(bboxes, labels, num_classes): - """Convert detection results to a list of numpy arrays. - - Args: - bboxes (torch.Tensor | np.ndarray): shape (n, 5) - labels (torch.Tensor | np.ndarray): shape (n, ) - num_classes (int): class number, including background class - - Returns: - list(ndarray): bbox results of each class - """ - if bboxes.shape[0] == 0: - return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] - else: - if isinstance(bboxes, torch.Tensor): - bboxes = bboxes.detach().cpu().numpy() - labels = labels.detach().cpu().numpy() - return [bboxes[labels == i, :] for i in range(num_classes)] - - -def distance2bbox(points, distance, max_shape=None): - """Decode distance prediction to bounding box. - - Args: - points (Tensor): Shape (B, N, 2) or (N, 2). - distance (Tensor): Distance from the given point to 4 - boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If priors shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - - Returns: - Tensor: Boxes with shape (N, 4) or (B, N, 4) - """ - x1 = points[..., 0] - distance[..., 0] - y1 = points[..., 1] - distance[..., 1] - x2 = points[..., 0] + distance[..., 2] - y2 = points[..., 1] + distance[..., 3] - - bboxes = torch.stack([x1, y1, x2, y2], -1) - - if max_shape is not None: - if not isinstance(max_shape, torch.Tensor): - max_shape = x1.new_tensor(max_shape) - max_shape = max_shape[..., :2].type_as(x1) - if max_shape.ndim == 2: - assert bboxes.ndim == 3 - assert max_shape.size(0) == bboxes.size(0) - - min_xy = x1.new_tensor(0) - max_xy = torch.cat([max_shape, max_shape], - dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - return bboxes - - -def bbox2distance(points, bbox, max_dis=None, eps=0.1): - """Decode bounding box based on distances. - - Args: - points (Tensor): Shape (n, 2), [x, y]. - bbox (Tensor): Shape (n, 4), "xyxy" format - max_dis (float): Upper bound of the distance. - eps (float): a small value to ensure target < max_dis, instead <= - - Returns: - Tensor: Decoded distances. - """ - left = points[:, 0] - bbox[:, 0] - top = points[:, 1] - bbox[:, 1] - right = bbox[:, 2] - points[:, 0] - bottom = bbox[:, 3] - points[:, 1] - if max_dis is not None: - left = left.clamp(min=0, max=max_dis - eps) - top = top.clamp(min=0, max=max_dis - eps) - right = right.clamp(min=0, max=max_dis - eps) - bottom = bottom.clamp(min=0, max=max_dis - eps) - return torch.stack([left, top, right, bottom], -1) - - -def bbox_rescale(bboxes, scale_factor=1.0): - """Rescale bounding box w.r.t. scale_factor. - - Args: - bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois - scale_factor (float): rescale factor - - Returns: - Tensor: Rescaled bboxes. - """ - if bboxes.size(1) == 5: - bboxes_ = bboxes[:, 1:] - inds_ = bboxes[:, 0] - else: - bboxes_ = bboxes - cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 - cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 - w = bboxes_[:, 2] - bboxes_[:, 0] - h = bboxes_[:, 3] - bboxes_[:, 1] - w = w * scale_factor - h = h * scale_factor - x1 = cx - 0.5 * w - x2 = cx + 0.5 * w - y1 = cy - 0.5 * h - y2 = cy + 0.5 * h - if bboxes.size(1) == 5: - rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) - else: - rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) - return rescaled_bboxes - - -def bbox_cxcywh_to_xyxy(bbox): - """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). - - Args: - bbox (Tensor): Shape (n, 4) for bboxes. - - Returns: - Tensor: Converted bboxes. - """ - cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) - bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] - return torch.cat(bbox_new, dim=-1) - - -def bbox_xyxy_to_cxcywh(bbox): - """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). - - Args: - bbox (Tensor): Shape (n, 4) for bboxes. - - Returns: - Tensor: Converted bboxes. - """ - x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) - bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] - return torch.cat(bbox_new, dim=-1) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/utils/misc.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/utils/misc.py deleted file mode 100644 index 3e22c7b9085317b61a25c67d361f7e70df65bed1..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/utils/misc.py +++ /dev/null @@ -1,61 +0,0 @@ -from functools import partial - -import numpy as np -import torch -from six.moves import map, zip - -from ..mask.structures import BitmapMasks, PolygonMasks - - -def multi_apply(func, *args, **kwargs): - """Apply function to a list of arguments. - - Note: - This function applies the ``func`` to multiple inputs and - map the multiple outputs of the ``func`` into different - list. Each list contains the same type of outputs corresponding - to different inputs. - - Args: - func (Function): A function that will be applied to a list of - arguments - - Returns: - tuple(list): A tuple containing multiple list, each list contains \ - a kind of returned results by the function - """ - pfunc = partial(func, **kwargs) if kwargs else func - map_results = map(pfunc, *args) - return tuple(map(list, zip(*map_results))) - - -def unmap(data, count, inds, fill=0): - """Unmap a subset of item (data) back to the original set of items (of size - count)""" - if data.dim() == 1: - ret = data.new_full((count, ), fill) - ret[inds.type(torch.bool)] = data - else: - new_size = (count, ) + data.size()[1:] - ret = data.new_full(new_size, fill) - ret[inds.type(torch.bool), :] = data - return ret - - -def mask2ndarray(mask): - """Convert Mask to ndarray.. - - Args: - mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or - torch.Tensor or np.ndarray): The mask to be converted. - - Returns: - np.ndarray: Ndarray mask of shape (n, h, w) that has been converted - """ - if isinstance(mask, (BitmapMasks, PolygonMasks)): - mask = mask.to_ndarray() - elif isinstance(mask, torch.Tensor): - mask = mask.detach().cpu().numpy() - elif not isinstance(mask, np.ndarray): - raise TypeError(f'Unsupported {type(mask)} data type') - return mask diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/backbones/res2net.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/backbones/res2net.py deleted file mode 100644 index 7901b7f2fa29741d72328bdbdbf92fc4d5c5f847..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/backbones/res2net.py +++ /dev/null @@ -1,351 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, - kaiming_init) -from mmcv.runner import load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from mmdet.utils import get_root_logger -from ..builder import BACKBONES -from .resnet import Bottleneck as _Bottleneck -from .resnet import ResNet - - -class Bottle2neck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - scales=4, - base_width=26, - base_channels=64, - stage_type='normal', - **kwargs): - """Bottle2neck block for Res2Net. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottle2neck, self).__init__(inplanes, planes, **kwargs) - assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' - width = int(math.floor(self.planes * (base_width / base_channels))) - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width * scales, postfix=1) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width * scales, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - - if stage_type == 'stage' and self.conv2_stride != 1: - self.pool = nn.AvgPool2d( - kernel_size=3, stride=self.conv2_stride, padding=1) - convs = [] - bns = [] - - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - for i in range(scales - 1): - convs.append( - build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False)) - bns.append( - build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - for i in range(scales - 1): - convs.append( - build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - bias=False)) - bns.append( - build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - - self.conv3 = build_conv_layer( - self.conv_cfg, - width * scales, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - self.stage_type = stage_type - self.scales = scales - self.width = width - delattr(self, 'conv2') - delattr(self, self.norm2_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - spx = torch.split(out, self.width, 1) - sp = self.convs[0](spx[0].contiguous()) - sp = self.relu(self.bns[0](sp)) - out = sp - for i in range(1, self.scales - 1): - if self.stage_type == 'stage': - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp.contiguous()) - sp = self.relu(self.bns[i](sp)) - out = torch.cat((out, sp), 1) - - if self.stage_type == 'normal' or self.conv2_stride == 1: - out = torch.cat((out, spx[self.scales - 1]), 1) - elif self.stage_type == 'stage': - out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -class Res2Layer(nn.Sequential): - """Res2Layer to build Res2Net style backbone. - - Args: - block (nn.Module): block used to build ResLayer. - inplanes (int): inplanes of block. - planes (int): planes of block. - num_blocks (int): number of blocks. - stride (int): stride of the first block. Default: 1 - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottle2neck. Default: False - conv_cfg (dict): dictionary to construct and config conv layer. - Default: None - norm_cfg (dict): dictionary to construct and config norm layer. - Default: dict(type='BN') - scales (int): Scales used in Res2Net. Default: 4 - base_width (int): Basic width of each scale. Default: 26 - """ - - def __init__(self, - block, - inplanes, - planes, - num_blocks, - stride=1, - avg_down=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - scales=4, - base_width=26, - **kwargs): - self.block = block - - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.AvgPool2d( - kernel_size=stride, - stride=stride, - ceil_mode=True, - count_include_pad=False), - build_conv_layer( - conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=1, - bias=False), - build_norm_layer(norm_cfg, planes * block.expansion)[1], - ) - - layers = [] - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=stride, - downsample=downsample, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - scales=scales, - base_width=base_width, - stage_type='stage', - **kwargs)) - inplanes = planes * block.expansion - for i in range(1, num_blocks): - layers.append( - block( - inplanes=inplanes, - planes=planes, - stride=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - scales=scales, - base_width=base_width, - **kwargs)) - super(Res2Layer, self).__init__(*layers) - - -@BACKBONES.register_module() -class Res2Net(ResNet): - """Res2Net backbone. - - Args: - scales (int): Scales used in Res2Net. Default: 4 - base_width (int): Basic width of each scale. Default: 26 - depth (int): Depth of res2net, from {50, 101, 152}. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Res2net stages. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottle2neck. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - plugins (list[dict]): List of plugins for stages, each dict contains: - - - cfg (dict, required): Cfg dict to build plugin. - - position (str, required): Position inside block to insert - plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - - stages (tuple[bool], optional): Stages to apply plugin, length - should be same as 'num_stages'. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. - - Example: - >>> from mmdet.models import Res2Net - >>> import torch - >>> self = Res2Net(depth=50, scales=4, base_width=26) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 256, 8, 8) - (1, 512, 4, 4) - (1, 1024, 2, 2) - (1, 2048, 1, 1) - """ - - arch_settings = { - 50: (Bottle2neck, (3, 4, 6, 3)), - 101: (Bottle2neck, (3, 4, 23, 3)), - 152: (Bottle2neck, (3, 8, 36, 3)) - } - - def __init__(self, - scales=4, - base_width=26, - style='pytorch', - deep_stem=True, - avg_down=True, - **kwargs): - self.scales = scales - self.base_width = base_width - super(Res2Net, self).__init__( - style='pytorch', deep_stem=True, avg_down=True, **kwargs) - - def make_res_layer(self, **kwargs): - return Res2Layer( - scales=self.scales, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - if self.dcn is not None: - for m in self.modules(): - if isinstance(m, Bottle2neck): - # dcn in Res2Net bottle2neck is in ModuleList - for n in m.convs: - if hasattr(n, 'conv_offset'): - constant_init(n.conv_offset, 0) - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottle2neck): - constant_init(m.norm3, 0) - else: - raise TypeError('pretrained must be a str or None') diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/info.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/info.py deleted file mode 100644 index 7625b87f9e2f4c708e5f70a20c9dea4e0ce16a62..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/info.py +++ /dev/null @@ -1,203 +0,0 @@ -"""Get environment information useful for debugging. - -Intended usage is to create a file for bug reports, e.g.:: - - python -m pyglet.info > info.txt - -""" - -_first_heading = True - - -def _heading(heading): - global _first_heading - if not _first_heading: - print() - else: - _first_heading = False - print(heading) - print('-' * 78) - - -def dump_platform(): - """Dump OS specific """ - import platform - print('platform: ', platform.platform()) - print('release: ', platform.release()) - print('machine: ', platform.machine()) - - -def dump_python(): - """Dump Python version and environment to stdout.""" - import os - import sys - import platform - print('implementation:', platform.python_implementation()) - print('sys.version:', sys.version) - print('sys.maxint:', sys.maxsize) - if sys.platform == 'darwin': - try: - from objc import __version__ as pyobjc_version - print('objc.__version__:', pyobjc_version) - except: - print('PyObjC not available') - print('os.getcwd():', os.getcwd()) - for key, value in os.environ.items(): - if key.startswith('PYGLET_'): - print(f"os.environ['{key}']: {value}") - - -def dump_pyglet(): - """Dump pyglet version and options.""" - import pyglet - print('pyglet.version:', pyglet.version) - print('pyglet.compat_platform:', pyglet.compat_platform) - print('pyglet.__file__:', pyglet.__file__) - for key, value in pyglet.options.items(): - print(f"pyglet.options['{key}'] = {value!r}") - - -def dump_window(): - """Dump display, window, screen and default config info.""" - from pyglet.gl import gl_info - if not gl_info.have_version(3): - print(f"Insufficient OpenGL version: {gl_info.get_version_string()}") - return - import pyglet.window - display = pyglet.canvas.get_display() - print('display:', repr(display)) - screens = display.get_screens() - for i, screen in enumerate(screens): - print(f'screens[{i}]: {screen!r}') - window = pyglet.window.Window(visible=False) - for key, value in window.config.get_gl_attributes(): - print(f"config['{key}'] = {value!r}") - print('context:', repr(window.context)) - - _heading('window.context._info') - dump_gl(window.context) - window.close() - - -def dump_gl(context=None): - """Dump GL info.""" - if context is not None: - info = context.get_info() - else: - from pyglet.gl import gl_info as info - print('gl_info.get_version():', info.get_version()) - print('gl_info.get_vendor():', info.get_vendor()) - print('gl_info.get_renderer():', info.get_renderer()) - print('gl_info.get_extensions():') - extensions = list(info.get_extensions()) - extensions.sort() - for name in extensions: - print(' ', name) - - -def dump_glx(): - """Dump GLX info.""" - try: - from pyglet.gl import glx_info - except: - print('GLX not available.') - return - import pyglet - window = pyglet.window.Window(visible=False) - print('context.is_direct():', window.context.is_direct()) - window.close() - - if not glx_info.have_version(1, 1): - print('Version: < 1.1') - else: - print('glx_info.get_server_vendor():', glx_info.get_server_vendor()) - print('glx_info.get_server_version():', glx_info.get_server_version()) - print('glx_info.get_server_extensions():') - for name in glx_info.get_server_extensions(): - print(' ', name) - print('glx_info.get_client_vendor():', glx_info.get_client_vendor()) - print('glx_info.get_client_version():', glx_info.get_client_version()) - print('glx_info.get_client_extensions():') - for name in glx_info.get_client_extensions(): - print(' ', name) - print('glx_info.get_extensions():') - for name in glx_info.get_extensions(): - print(' ', name) - - -def dump_media(): - """Dump pyglet.media info.""" - import pyglet.media - print('audio driver:', pyglet.media.get_audio_driver()) - - -def dump_ffmpeg(): - """Dump FFmpeg info.""" - import pyglet - pyglet.options['search_local_libs'] = True - import pyglet.media - - if pyglet.media.have_ffmpeg(): - from pyglet.media.codecs.ffmpeg import get_version - print('FFmpeg version:', get_version()) - else: - print('FFmpeg not available.') - - -def dump_al(): - """Dump OpenAL info.""" - try: - from pyglet.media.drivers import openal - except: - print('OpenAL not available.') - return - print('Library:', openal.lib_openal._lib) - - driver = openal.create_audio_driver() - print('Version: {}.{}'.format(*driver.get_version())) - print('Extensions:') - for extension in driver.get_extensions(): - print(' ', extension) - - -def dump_wintab(): - """Dump WinTab info.""" - try: - from pyglet.input.win32 import wintab - except: - print('WinTab not available.') - return - - interface_name = wintab.get_interface_name() - impl_version = wintab.get_implementation_version() - spec_version = wintab.get_spec_version() - - print('WinTab: {0} {1}.{2} (Spec {3}.{4})'.format(interface_name, - impl_version >> 8, impl_version & 0xff, - spec_version >> 8, spec_version & 0xff)) - - -def _try_dump(heading, func): - _heading(heading) - try: - func() - except: - import traceback - traceback.print_exc() - - -def dump(): - """Dump all information to stdout.""" - _try_dump('Platform', dump_platform) - _try_dump('Python', dump_python) - _try_dump('pyglet', dump_pyglet) - _try_dump('pyglet.window', dump_window) - _try_dump('pyglet.gl.glx_info', dump_glx) - _try_dump('pyglet.media', dump_media) - _try_dump('pyglet.media.ffmpeg', dump_ffmpeg) - _try_dump('pyglet.media.drivers.openal', dump_al) - _try_dump('pyglet.input.wintab', dump_wintab) - - -if __name__ == '__main__': - dump() diff --git a/spaces/akhaliq/Mask2Former/mask2former/modeling/meta_arch/__init__.py b/spaces/akhaliq/Mask2Former/mask2former/modeling/meta_arch/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former/modeling/meta_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/test/test_doctests.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/test/test_doctests.py deleted file mode 100644 index 49d2bfa6d32663cbe4223bf94346aadce247c6ea..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/test/test_doctests.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -""" -This file causes the doctests to be included as part of unit tests. - -To make sure the doctests of a specific module are included, -please replicate the `addTests` call for the iterators module below. -""" - -import doctest -import infinibatch.iterators - - -def load_tests(loader, tests, ignore): - tests.addTests(doctest.DocTestSuite(infinibatch.iterators)) - return tests diff --git a/spaces/akhaliq/Swin-Transformer/app.py b/spaces/akhaliq/Swin-Transformer/app.py deleted file mode 100644 index 0307c87d93f84578233376e3ea19e23a3aac5468..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Swin-Transformer/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import gradio as gr - -title="Swin Transformer" -description="Gradio Demo for Swin Transformer: Hierarchical Vision Transformer using Shifted Windows. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." - -article = "

      Swin Transformer: Hierarchical Vision Transformer using Shifted Windows | Github Repo

      " - - -io1 = gr.Interface.load("huggingface/microsoft/swin-large-patch4-window12-384-in22k") - -#io2 = gr.Interface.load("huggingface/microsoft/swin-base-patch4-window7-224-in22k") - - -def inference(image, model): - if model == "swin-large-patch4-window12-384-in22k": - outtext = io1(image) - else: - outtext = io2(image) - return outtext - - -examples=[['tiger.jpeg','swin-large-patch4-window12-384-in22k']] - -gr.Interface( - inference, - [gr.inputs.Image(label="Input Image",type='filepath'),gr.inputs.Dropdown(choices=["swin-large-patch4-window12-384-in22k"], type="value", default="swin-large-patch4-window12-384-in22k", label="model") -], - gr.outputs.Label(label="Classification"), - examples=examples, - article=article, - title=title, - description=description).launch(enable_queue=True,cache_examples=True) \ No newline at end of file diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/bin/normalize.py b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/bin/normalize.py deleted file mode 100644 index 53644fd5964299787ea1be39e6082627be32fbfd..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/bin/normalize.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Normalize feature files and dump them.""" - -import argparse -import logging -import os - -import numpy as np -import yaml - -from sklearn.preprocessing import StandardScaler -from tqdm import tqdm - -from parallel_wavegan.datasets import AudioMelDataset -from parallel_wavegan.datasets import AudioMelSCPDataset -from parallel_wavegan.datasets import MelDataset -from parallel_wavegan.datasets import MelSCPDataset -from parallel_wavegan.utils import read_hdf5 -from parallel_wavegan.utils import write_hdf5 - - -def main(): - """Run preprocessing process.""" - parser = argparse.ArgumentParser( - description="Normalize dumped raw features (See detail in parallel_wavegan/bin/normalize.py)." - ) - parser.add_argument( - "--rootdir", - default=None, - type=str, - help="directory including feature files to be normalized. " - "you need to specify either *-scp or rootdir.", - ) - parser.add_argument( - "--wav-scp", - default=None, - type=str, - help="kaldi-style wav.scp file. " - "you need to specify either *-scp or rootdir.", - ) - parser.add_argument( - "--feats-scp", - default=None, - type=str, - help="kaldi-style feats.scp file. " - "you need to specify either *-scp or rootdir.", - ) - parser.add_argument( - "--segments", - default=None, - type=str, - help="kaldi-style segments file.", - ) - parser.add_argument( - "--dumpdir", - type=str, - required=True, - help="directory to dump normalized feature files.", - ) - parser.add_argument( - "--stats", - type=str, - required=True, - help="statistics file.", - ) - parser.add_argument( - "--skip-wav-copy", - default=False, - action="store_true", - help="whether to skip the copy of wav files.", - ) - parser.add_argument( - "--config", type=str, required=True, help="yaml format configuration file." - ) - parser.add_argument( - "--verbose", - type=int, - default=1, - help="logging level. higher is more logging. (default=1)", - ) - args = parser.parse_args() - - # set logger - if args.verbose > 1: - logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - elif args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - logging.warning("Skip DEBUG/INFO messages") - - # load config - with open(args.config) as f: - config = yaml.load(f, Loader=yaml.Loader) - config.update(vars(args)) - - # check arguments - if (args.feats_scp is not None and args.rootdir is not None) or ( - args.feats_scp is None and args.rootdir is None - ): - raise ValueError("Please specify either --rootdir or --feats-scp.") - - # check directory existence - if not os.path.exists(args.dumpdir): - os.makedirs(args.dumpdir) - - # get dataset - if args.rootdir is not None: - if config["format"] == "hdf5": - audio_query, mel_query = "*.h5", "*.h5" - audio_load_fn = lambda x: read_hdf5(x, "wave") # NOQA - mel_load_fn = lambda x: read_hdf5(x, "feats") # NOQA - elif config["format"] == "npy": - audio_query, mel_query = "*-wave.npy", "*-feats.npy" - audio_load_fn = np.load - mel_load_fn = np.load - else: - raise ValueError("support only hdf5 or npy format.") - if not args.skip_wav_copy: - dataset = AudioMelDataset( - root_dir=args.rootdir, - audio_query=audio_query, - mel_query=mel_query, - audio_load_fn=audio_load_fn, - mel_load_fn=mel_load_fn, - return_utt_id=True, - ) - else: - dataset = MelDataset( - root_dir=args.rootdir, - mel_query=mel_query, - mel_load_fn=mel_load_fn, - return_utt_id=True, - ) - else: - if not args.skip_wav_copy: - dataset = AudioMelSCPDataset( - wav_scp=args.wav_scp, - feats_scp=args.feats_scp, - segments=args.segments, - return_utt_id=True, - ) - else: - dataset = MelSCPDataset( - feats_scp=args.feats_scp, - return_utt_id=True, - ) - logging.info(f"The number of files = {len(dataset)}.") - - # restore scaler - scaler = StandardScaler() - if config["format"] == "hdf5": - scaler.mean_ = read_hdf5(args.stats, "mean") - scaler.scale_ = read_hdf5(args.stats, "scale") - elif config["format"] == "npy": - scaler.mean_ = np.load(args.stats)[0] - scaler.scale_ = np.load(args.stats)[1] - else: - raise ValueError("support only hdf5 or npy format.") - # from version 0.23.0, this information is needed - scaler.n_features_in_ = scaler.mean_.shape[0] - - # process each file - for items in tqdm(dataset): - if not args.skip_wav_copy: - utt_id, audio, mel = items - else: - utt_id, mel = items - - # normalize - mel = scaler.transform(mel) - - # save - if config["format"] == "hdf5": - write_hdf5( - os.path.join(args.dumpdir, f"{utt_id}.h5"), - "feats", - mel.astype(np.float32), - ) - if not args.skip_wav_copy: - write_hdf5( - os.path.join(args.dumpdir, f"{utt_id}.h5"), - "wave", - audio.astype(np.float32), - ) - elif config["format"] == "npy": - np.save( - os.path.join(args.dumpdir, f"{utt_id}-feats.npy"), - mel.astype(np.float32), - allow_pickle=False, - ) - if not args.skip_wav_copy: - np.save( - os.path.join(args.dumpdir, f"{utt_id}-wave.npy"), - audio.astype(np.float32), - allow_pickle=False, - ) - else: - raise ValueError("support only hdf5 or npy format.") - - -if __name__ == "__main__": - main() diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/layers/tf_layers.py b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/layers/tf_layers.py deleted file mode 100644 index e06ffc013837e52596336ce8e45fcfedf78c0666..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/layers/tf_layers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2020 MINH ANH (@dathudeptrai) -# MIT License (https://opensource.org/licenses/MIT) - -"""Tensorflow Layer modules complatible with pytorch.""" - -import tensorflow as tf - - -class TFReflectionPad1d(tf.keras.layers.Layer): - """Tensorflow ReflectionPad1d module.""" - - def __init__(self, padding_size): - """Initialize TFReflectionPad1d module. - - Args: - padding_size (int): Padding size. - - """ - super(TFReflectionPad1d, self).__init__() - self.padding_size = padding_size - - @tf.function - def call(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, T, 1, C). - - Returns: - Tensor: Padded tensor (B, T + 2 * padding_size, 1, C). - - """ - return tf.pad( - x, - [[0, 0], [self.padding_size, self.padding_size], [0, 0], [0, 0]], - "REFLECT", - ) - - -class TFConvTranspose1d(tf.keras.layers.Layer): - """Tensorflow ConvTranspose1d module.""" - - def __init__(self, channels, kernel_size, stride, padding): - """Initialize TFConvTranspose1d( module. - - Args: - channels (int): Number of channels. - kernel_size (int): kernel size. - strides (int): Stride width. - padding (str): Padding type ("same" or "valid"). - - """ - super(TFConvTranspose1d, self).__init__() - self.conv1d_transpose = tf.keras.layers.Conv2DTranspose( - filters=channels, - kernel_size=(kernel_size, 1), - strides=(stride, 1), - padding=padding, - ) - - @tf.function - def call(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, T, 1, C). - - Returns: - Tensors: Output tensor (B, T', 1, C'). - - """ - x = self.conv1d_transpose(x) - return x - - -class TFResidualStack(tf.keras.layers.Layer): - """Tensorflow ResidualStack module.""" - - def __init__( - self, - kernel_size, - channels, - dilation, - bias, - nonlinear_activation, - nonlinear_activation_params, - padding, - ): - """Initialize TFResidualStack module. - - Args: - kernel_size (int): Kernel size. - channles (int): Number of channels. - dilation (int): Dilation ine. - bias (bool): Whether to add bias parameter in convolution layers. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - padding (str): Padding type ("same" or "valid"). - - """ - super(TFResidualStack, self).__init__() - self.block = [ - getattr(tf.keras.layers, nonlinear_activation)( - **nonlinear_activation_params - ), - TFReflectionPad1d(dilation), - tf.keras.layers.Conv2D( - filters=channels, - kernel_size=(kernel_size, 1), - dilation_rate=(dilation, 1), - use_bias=bias, - padding="valid", - ), - getattr(tf.keras.layers, nonlinear_activation)( - **nonlinear_activation_params - ), - tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias), - ] - self.shortcut = tf.keras.layers.Conv2D( - filters=channels, kernel_size=1, use_bias=bias - ) - - @tf.function - def call(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, T, 1, C). - - Returns: - Tensor: Output tensor (B, T, 1, C). - - """ - _x = tf.identity(x) - for i, layer in enumerate(self.block): - _x = layer(_x) - shortcut = self.shortcut(x) - return shortcut + _x diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py deleted file mode 100644 index 25bfb391d88259d72cc90cb2e4229ab9698ebb04..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/req/constructors.py +++ /dev/null @@ -1,490 +0,0 @@ -"""Backing implementation for InstallRequirement's various constructors - -The idea here is that these formed a major chunk of InstallRequirement's size -so, moving them and support code dedicated to them outside of that class -helps creates for better understandability for the rest of the code. - -These are meant to be used elsewhere within pip to create instances of -InstallRequirement. -""" - -import logging -import os -import re -from typing import Any, Dict, Optional, Set, Tuple, Union - -from pip._vendor.packaging.markers import Marker -from pip._vendor.packaging.requirements import InvalidRequirement, Requirement -from pip._vendor.packaging.specifiers import Specifier - -from pip._internal.exceptions import InstallationError -from pip._internal.models.index import PyPI, TestPyPI -from pip._internal.models.link import Link -from pip._internal.models.wheel import Wheel -from pip._internal.req.req_file import ParsedRequirement -from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.filetypes import is_archive_file -from pip._internal.utils.misc import is_installable_dir -from pip._internal.utils.packaging import get_requirement -from pip._internal.utils.urls import path_to_url -from pip._internal.vcs import is_url, vcs - -__all__ = [ - "install_req_from_editable", - "install_req_from_line", - "parse_editable", -] - -logger = logging.getLogger(__name__) -operators = Specifier._operators.keys() - - -def _strip_extras(path: str) -> Tuple[str, Optional[str]]: - m = re.match(r"^(.+)(\[[^\]]+\])$", path) - extras = None - if m: - path_no_extras = m.group(1) - extras = m.group(2) - else: - path_no_extras = path - - return path_no_extras, extras - - -def convert_extras(extras: Optional[str]) -> Set[str]: - if not extras: - return set() - return get_requirement("placeholder" + extras.lower()).extras - - -def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]: - """Parses an editable requirement into: - - a requirement name - - an URL - - extras - - editable options - Accepted requirements: - svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir - .[some_extra] - """ - - url = editable_req - - # If a file path is specified with extras, strip off the extras. - url_no_extras, extras = _strip_extras(url) - - if os.path.isdir(url_no_extras): - # Treating it as code that has already been checked out - url_no_extras = path_to_url(url_no_extras) - - if url_no_extras.lower().startswith("file:"): - package_name = Link(url_no_extras).egg_fragment - if extras: - return ( - package_name, - url_no_extras, - get_requirement("placeholder" + extras.lower()).extras, - ) - else: - return package_name, url_no_extras, set() - - for version_control in vcs: - if url.lower().startswith(f"{version_control}:"): - url = f"{version_control}+{url}" - break - - link = Link(url) - - if not link.is_vcs: - backends = ", ".join(vcs.all_schemes) - raise InstallationError( - f"{editable_req} is not a valid editable requirement. " - f"It should either be a path to a local project or a VCS URL " - f"(beginning with {backends})." - ) - - package_name = link.egg_fragment - if not package_name: - raise InstallationError( - "Could not detect requirement name for '{}', please specify one " - "with #egg=your_package_name".format(editable_req) - ) - return package_name, url, set() - - -def check_first_requirement_in_file(filename: str) -> None: - """Check if file is parsable as a requirements file. - - This is heavily based on ``pkg_resources.parse_requirements``, but - simplified to just check the first meaningful line. - - :raises InvalidRequirement: If the first meaningful line cannot be parsed - as an requirement. - """ - with open(filename, encoding="utf-8", errors="ignore") as f: - # Create a steppable iterator, so we can handle \-continuations. - lines = ( - line - for line in (line.strip() for line in f) - if line and not line.startswith("#") # Skip blank lines/comments. - ) - - for line in lines: - # Drop comments -- a hash without a space may be in a URL. - if " #" in line: - line = line[: line.find(" #")] - # If there is a line continuation, drop it, and append the next line. - if line.endswith("\\"): - line = line[:-2].strip() + next(lines, "") - Requirement(line) - return - - -def deduce_helpful_msg(req: str) -> str: - """Returns helpful msg in case requirements file does not exist, - or cannot be parsed. - - :params req: Requirements file path - """ - if not os.path.exists(req): - return f" File '{req}' does not exist." - msg = " The path does exist. " - # Try to parse and check if it is a requirements file. - try: - check_first_requirement_in_file(req) - except InvalidRequirement: - logger.debug("Cannot parse '%s' as requirements file", req) - else: - msg += ( - f"The argument you provided " - f"({req}) appears to be a" - f" requirements file. If that is the" - f" case, use the '-r' flag to install" - f" the packages specified within it." - ) - return msg - - -class RequirementParts: - def __init__( - self, - requirement: Optional[Requirement], - link: Optional[Link], - markers: Optional[Marker], - extras: Set[str], - ): - self.requirement = requirement - self.link = link - self.markers = markers - self.extras = extras - - -def parse_req_from_editable(editable_req: str) -> RequirementParts: - name, url, extras_override = parse_editable(editable_req) - - if name is not None: - try: - req: Optional[Requirement] = Requirement(name) - except InvalidRequirement: - raise InstallationError(f"Invalid requirement: '{name}'") - else: - req = None - - link = Link(url) - - return RequirementParts(req, link, None, extras_override) - - -# ---- The actual constructors follow ---- - - -def install_req_from_editable( - editable_req: str, - comes_from: Optional[Union[InstallRequirement, str]] = None, - use_pep517: Optional[bool] = None, - isolated: bool = False, - options: Optional[Dict[str, Any]] = None, - constraint: bool = False, - user_supplied: bool = False, - permit_editable_wheels: bool = False, -) -> InstallRequirement: - - parts = parse_req_from_editable(editable_req) - - return InstallRequirement( - parts.requirement, - comes_from=comes_from, - user_supplied=user_supplied, - editable=True, - permit_editable_wheels=permit_editable_wheels, - link=parts.link, - constraint=constraint, - use_pep517=use_pep517, - isolated=isolated, - install_options=options.get("install_options", []) if options else [], - global_options=options.get("global_options", []) if options else [], - hash_options=options.get("hashes", {}) if options else {}, - extras=parts.extras, - ) - - -def _looks_like_path(name: str) -> bool: - """Checks whether the string "looks like" a path on the filesystem. - - This does not check whether the target actually exists, only judge from the - appearance. - - Returns true if any of the following conditions is true: - * a path separator is found (either os.path.sep or os.path.altsep); - * a dot is found (which represents the current directory). - """ - if os.path.sep in name: - return True - if os.path.altsep is not None and os.path.altsep in name: - return True - if name.startswith("."): - return True - return False - - -def _get_url_from_path(path: str, name: str) -> Optional[str]: - """ - First, it checks whether a provided path is an installable directory. If it - is, returns the path. - - If false, check if the path is an archive file (such as a .whl). - The function checks if the path is a file. If false, if the path has - an @, it will treat it as a PEP 440 URL requirement and return the path. - """ - if _looks_like_path(name) and os.path.isdir(path): - if is_installable_dir(path): - return path_to_url(path) - # TODO: The is_installable_dir test here might not be necessary - # now that it is done in load_pyproject_toml too. - raise InstallationError( - f"Directory {name!r} is not installable. Neither 'setup.py' " - "nor 'pyproject.toml' found." - ) - if not is_archive_file(path): - return None - if os.path.isfile(path): - return path_to_url(path) - urlreq_parts = name.split("@", 1) - if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]): - # If the path contains '@' and the part before it does not look - # like a path, try to treat it as a PEP 440 URL req instead. - return None - logger.warning( - "Requirement %r looks like a filename, but the file does not exist", - name, - ) - return path_to_url(path) - - -def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts: - if is_url(name): - marker_sep = "; " - else: - marker_sep = ";" - if marker_sep in name: - name, markers_as_string = name.split(marker_sep, 1) - markers_as_string = markers_as_string.strip() - if not markers_as_string: - markers = None - else: - markers = Marker(markers_as_string) - else: - markers = None - name = name.strip() - req_as_string = None - path = os.path.normpath(os.path.abspath(name)) - link = None - extras_as_string = None - - if is_url(name): - link = Link(name) - else: - p, extras_as_string = _strip_extras(path) - url = _get_url_from_path(p, name) - if url is not None: - link = Link(url) - - # it's a local file, dir, or url - if link: - # Handle relative file URLs - if link.scheme == "file" and re.search(r"\.\./", link.url): - link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path)))) - # wheel file - if link.is_wheel: - wheel = Wheel(link.filename) # can raise InvalidWheelFilename - req_as_string = f"{wheel.name}=={wheel.version}" - else: - # set the req to the egg fragment. when it's not there, this - # will become an 'unnamed' requirement - req_as_string = link.egg_fragment - - # a requirement specifier - else: - req_as_string = name - - extras = convert_extras(extras_as_string) - - def with_source(text: str) -> str: - if not line_source: - return text - return f"{text} (from {line_source})" - - def _parse_req_string(req_as_string: str) -> Requirement: - try: - req = get_requirement(req_as_string) - except InvalidRequirement: - if os.path.sep in req_as_string: - add_msg = "It looks like a path." - add_msg += deduce_helpful_msg(req_as_string) - elif "=" in req_as_string and not any( - op in req_as_string for op in operators - ): - add_msg = "= is not a valid operator. Did you mean == ?" - else: - add_msg = "" - msg = with_source(f"Invalid requirement: {req_as_string!r}") - if add_msg: - msg += f"\nHint: {add_msg}" - raise InstallationError(msg) - else: - # Deprecate extras after specifiers: "name>=1.0[extras]" - # This currently works by accident because _strip_extras() parses - # any extras in the end of the string and those are saved in - # RequirementParts - for spec in req.specifier: - spec_str = str(spec) - if spec_str.endswith("]"): - msg = f"Extras after version '{spec_str}'." - raise InstallationError(msg) - return req - - if req_as_string is not None: - req: Optional[Requirement] = _parse_req_string(req_as_string) - else: - req = None - - return RequirementParts(req, link, markers, extras) - - -def install_req_from_line( - name: str, - comes_from: Optional[Union[str, InstallRequirement]] = None, - use_pep517: Optional[bool] = None, - isolated: bool = False, - options: Optional[Dict[str, Any]] = None, - constraint: bool = False, - line_source: Optional[str] = None, - user_supplied: bool = False, -) -> InstallRequirement: - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. - - :param line_source: An optional string describing where the line is from, - for logging purposes in case of an error. - """ - parts = parse_req_from_line(name, line_source) - - return InstallRequirement( - parts.requirement, - comes_from, - link=parts.link, - markers=parts.markers, - use_pep517=use_pep517, - isolated=isolated, - install_options=options.get("install_options", []) if options else [], - global_options=options.get("global_options", []) if options else [], - hash_options=options.get("hashes", {}) if options else {}, - constraint=constraint, - extras=parts.extras, - user_supplied=user_supplied, - ) - - -def install_req_from_req_string( - req_string: str, - comes_from: Optional[InstallRequirement] = None, - isolated: bool = False, - use_pep517: Optional[bool] = None, - user_supplied: bool = False, -) -> InstallRequirement: - try: - req = get_requirement(req_string) - except InvalidRequirement: - raise InstallationError(f"Invalid requirement: '{req_string}'") - - domains_not_allowed = [ - PyPI.file_storage_domain, - TestPyPI.file_storage_domain, - ] - if ( - req.url - and comes_from - and comes_from.link - and comes_from.link.netloc in domains_not_allowed - ): - # Explicitly disallow pypi packages that depend on external urls - raise InstallationError( - "Packages installed from PyPI cannot depend on packages " - "which are not also hosted on PyPI.\n" - "{} depends on {} ".format(comes_from.name, req) - ) - - return InstallRequirement( - req, - comes_from, - isolated=isolated, - use_pep517=use_pep517, - user_supplied=user_supplied, - ) - - -def install_req_from_parsed_requirement( - parsed_req: ParsedRequirement, - isolated: bool = False, - use_pep517: Optional[bool] = None, - user_supplied: bool = False, -) -> InstallRequirement: - if parsed_req.is_editable: - req = install_req_from_editable( - parsed_req.requirement, - comes_from=parsed_req.comes_from, - use_pep517=use_pep517, - constraint=parsed_req.constraint, - isolated=isolated, - user_supplied=user_supplied, - ) - - else: - req = install_req_from_line( - parsed_req.requirement, - comes_from=parsed_req.comes_from, - use_pep517=use_pep517, - isolated=isolated, - options=parsed_req.options, - constraint=parsed_req.constraint, - line_source=parsed_req.line_source, - user_supplied=user_supplied, - ) - return req - - -def install_req_from_link_and_ireq( - link: Link, ireq: InstallRequirement -) -> InstallRequirement: - return InstallRequirement( - req=ireq.req, - comes_from=ireq.comes_from, - editable=ireq.editable, - link=link, - markers=ireq.markers, - use_pep517=ireq.use_pep517, - isolated=ireq.isolated, - install_options=ireq.install_options, - global_options=ireq.global_options, - hash_options=ireq.hash_options, - ) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/packaging/_musllinux.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/packaging/_musllinux.py deleted file mode 100644 index 8ac3059ba3c246b9a5a6fb8d14936bb07777191e..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/packaging/_musllinux.py +++ /dev/null @@ -1,136 +0,0 @@ -"""PEP 656 support. - -This module implements logic to detect if the currently running Python is -linked against musl, and what musl version is used. -""" - -import contextlib -import functools -import operator -import os -import re -import struct -import subprocess -import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple - - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None - - -class _MuslVersion(NamedTuple): - major: int - minor: int - - -def _parse_musl_version(output: str) -> Optional[_MuslVersion]: - lines = [n for n in (n.strip() for n in output.splitlines()) if n] - if len(lines) < 2 or lines[0][:4] != "musl": - return None - m = re.match(r"Version (\d+)\.(\d+)", lines[1]) - if not m: - return None - return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) - - -@functools.lru_cache() -def _get_musl_version(executable: str) -> Optional[_MuslVersion]: - """Detect currently-running musl runtime version. - - This is done by checking the specified executable's dynamic linking - information, and invoking the loader to parse its output for a version - string. If the loader is musl, the output would be something like:: - - musl libc (x86_64) - Version 1.2.2 - Dynamic Program Loader - """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except OSError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: - return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) - return _parse_musl_version(proc.stderr) - - -def platform_tags(arch: str) -> Iterator[str]: - """Generate musllinux tags compatible to the current platform. - - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. - - :returns: An iterator of compatible musllinux tags. - """ - sys_musl = _get_musl_version(sys.executable) - if sys_musl is None: # Python not dynamically linked against musl. - return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" - - -if __name__ == "__main__": # pragma: no cover - import sysconfig - - plat = sysconfig.get_platform() - assert plat.startswith("linux-"), "not linux" - - print("plat:", plat) - print("musl:", _get_musl_version(sys.executable)) - print("tags:", end=" ") - for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): - print(t, end="\n ") diff --git a/spaces/allknowingroger/huggingface/assets/index-62ac0f80.js b/spaces/allknowingroger/huggingface/assets/index-62ac0f80.js deleted file mode 100644 index 53e7453679e79f7ca8bd811b7207fd4457fd1b00..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/huggingface/assets/index-62ac0f80.js +++ /dev/null @@ -1,41 +0,0 @@ -var Qc=Object.defineProperty;var Hc=(e,t,n)=>t in e?Qc(e,t,{enumerable:!0,configurable:!0,writable:!0,value:n}):e[t]=n;var hn=(e,t,n)=>(Hc(e,typeof t!="symbol"?t+"":t,n),n);(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))r(l);new MutationObserver(l=>{for(const o of l)if(o.type==="childList")for(const i of o.addedNodes)i.tagName==="LINK"&&i.rel==="modulepreload"&&r(i)}).observe(document,{childList:!0,subtree:!0});function n(l){const o={};return l.integrity&&(o.integrity=l.integrity),l.referrerPolicy&&(o.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?o.credentials="include":l.crossOrigin==="anonymous"?o.credentials="omit":o.credentials="same-origin",o}function r(l){if(l.ep)return;l.ep=!0;const o=n(l);fetch(l.href,o)}})();var es={exports:{}},ul={},ts={exports:{}},z={};/** - * @license React - * react.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var tr=Symbol.for("react.element"),Wc=Symbol.for("react.portal"),Kc=Symbol.for("react.fragment"),Yc=Symbol.for("react.strict_mode"),Xc=Symbol.for("react.profiler"),Gc=Symbol.for("react.provider"),qc=Symbol.for("react.context"),Zc=Symbol.for("react.forward_ref"),Jc=Symbol.for("react.suspense"),bc=Symbol.for("react.memo"),ed=Symbol.for("react.lazy"),Hi=Symbol.iterator;function td(e){return e===null||typeof e!="object"?null:(e=Hi&&e[Hi]||e["@@iterator"],typeof e=="function"?e:null)}var ns={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},rs=Object.assign,ls={};function dn(e,t,n){this.props=e,this.context=t,this.refs=ls,this.updater=n||ns}dn.prototype.isReactComponent={};dn.prototype.setState=function(e,t){if(typeof e!="object"&&typeof e!="function"&&e!=null)throw Error("setState(...): takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")};dn.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")};function os(){}os.prototype=dn.prototype;function Ko(e,t,n){this.props=e,this.context=t,this.refs=ls,this.updater=n||ns}var Yo=Ko.prototype=new os;Yo.constructor=Ko;rs(Yo,dn.prototype);Yo.isPureReactComponent=!0;var Wi=Array.isArray,is=Object.prototype.hasOwnProperty,Xo={current:null},us={key:!0,ref:!0,__self:!0,__source:!0};function ss(e,t,n){var r,l={},o=null,i=null;if(t!=null)for(r in t.ref!==void 0&&(i=t.ref),t.key!==void 0&&(o=""+t.key),t)is.call(t,r)&&!us.hasOwnProperty(r)&&(l[r]=t[r]);var u=arguments.length-2;if(u===1)l.children=n;else if(1>>1,te=j[G];if(0>>1;Gl(jl,I))ktl(ur,jl)?(j[G]=ur,j[kt]=I,G=kt):(j[G]=jl,j[xt]=I,G=xt);else if(ktl(ur,I))j[G]=ur,j[kt]=I,G=kt;else break e}}return L}function l(j,L){var I=j.sortIndex-L.sortIndex;return I!==0?I:j.id-L.id}if(typeof performance=="object"&&typeof performance.now=="function"){var o=performance;e.unstable_now=function(){return o.now()}}else{var i=Date,u=i.now();e.unstable_now=function(){return i.now()-u}}var s=[],d=[],y=1,c=null,g=3,v=!1,w=!1,k=!1,M=typeof setTimeout=="function"?setTimeout:null,m=typeof clearTimeout=="function"?clearTimeout:null,f=typeof setImmediate<"u"?setImmediate:null;typeof navigator<"u"&&navigator.scheduling!==void 0&&navigator.scheduling.isInputPending!==void 0&&navigator.scheduling.isInputPending.bind(navigator.scheduling);function h(j){for(var L=n(d);L!==null;){if(L.callback===null)r(d);else if(L.startTime<=j)r(d),L.sortIndex=L.expirationTime,t(s,L);else break;L=n(d)}}function S(j){if(k=!1,h(j),!w)if(n(s)!==null)w=!0,El(C);else{var L=n(d);L!==null&&Cl(S,L.startTime-j)}}function C(j,L){w=!1,k&&(k=!1,m(T),T=-1),v=!0;var I=g;try{for(h(L),c=n(s);c!==null&&(!(c.expirationTime>L)||j&&!Ie());){var G=c.callback;if(typeof G=="function"){c.callback=null,g=c.priorityLevel;var te=G(c.expirationTime<=L);L=e.unstable_now(),typeof te=="function"?c.callback=te:c===n(s)&&r(s),h(L)}else r(s);c=n(s)}if(c!==null)var ir=!0;else{var xt=n(d);xt!==null&&Cl(S,xt.startTime-L),ir=!1}return ir}finally{c=null,g=I,v=!1}}var _=!1,N=null,T=-1,X=5,F=-1;function Ie(){return!(e.unstable_now()-Fj||125G?(j.sortIndex=I,t(d,j),n(s)===null&&j===n(d)&&(k?(m(T),T=-1):k=!0,Cl(S,I-G))):(j.sortIndex=te,t(s,j),w||v||(w=!0,El(C))),j},e.unstable_shouldYield=Ie,e.unstable_wrapCallback=function(j){var L=g;return function(){var I=g;g=L;try{return j.apply(this,arguments)}finally{g=I}}}})(fs);ds.exports=fs;var fd=ds.exports;/** - * @license React - * react-dom.production.min.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var ps=p,Ee=fd;function x(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),bl=Object.prototype.hasOwnProperty,pd=/^[:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD][:A-Z_a-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\-.0-9\u00B7\u0300-\u036F\u203F-\u2040]*$/,Yi={},Xi={};function md(e){return bl.call(Xi,e)?!0:bl.call(Yi,e)?!1:pd.test(e)?Xi[e]=!0:(Yi[e]=!0,!1)}function yd(e,t,n,r){if(n!==null&&n.type===0)return!1;switch(typeof t){case"function":case"symbol":return!0;case"boolean":return r?!1:n!==null?!n.acceptsBooleans:(e=e.toLowerCase().slice(0,5),e!=="data-"&&e!=="aria-");default:return!1}}function hd(e,t,n,r){if(t===null||typeof t>"u"||yd(e,t,n,r))return!0;if(r)return!1;if(n!==null)switch(n.type){case 3:return!t;case 4:return t===!1;case 5:return isNaN(t);case 6:return isNaN(t)||1>t}return!1}function me(e,t,n,r,l,o,i){this.acceptsBooleans=t===2||t===3||t===4,this.attributeName=r,this.attributeNamespace=l,this.mustUseProperty=n,this.propertyName=e,this.type=t,this.sanitizeURL=o,this.removeEmptyString=i}var ie={};"children dangerouslySetInnerHTML defaultValue defaultChecked innerHTML suppressContentEditableWarning suppressHydrationWarning style".split(" ").forEach(function(e){ie[e]=new me(e,0,!1,e,null,!1,!1)});[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(e){var t=e[0];ie[t]=new me(t,1,!1,e[1],null,!1,!1)});["contentEditable","draggable","spellCheck","value"].forEach(function(e){ie[e]=new me(e,2,!1,e.toLowerCase(),null,!1,!1)});["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(e){ie[e]=new me(e,2,!1,e,null,!1,!1)});"allowFullScreen async autoFocus autoPlay controls default defer disabled disablePictureInPicture disableRemotePlayback formNoValidate hidden loop noModule noValidate open playsInline readOnly required reversed scoped seamless itemScope".split(" ").forEach(function(e){ie[e]=new me(e,3,!1,e.toLowerCase(),null,!1,!1)});["checked","multiple","muted","selected"].forEach(function(e){ie[e]=new me(e,3,!0,e,null,!1,!1)});["capture","download"].forEach(function(e){ie[e]=new me(e,4,!1,e,null,!1,!1)});["cols","rows","size","span"].forEach(function(e){ie[e]=new me(e,6,!1,e,null,!1,!1)});["rowSpan","start"].forEach(function(e){ie[e]=new me(e,5,!1,e.toLowerCase(),null,!1,!1)});var qo=/[\-:]([a-z])/g;function Zo(e){return e[1].toUpperCase()}"accent-height alignment-baseline arabic-form baseline-shift cap-height clip-path clip-rule color-interpolation color-interpolation-filters color-profile color-rendering dominant-baseline enable-background fill-opacity fill-rule flood-color flood-opacity font-family font-size font-size-adjust font-stretch font-style font-variant font-weight glyph-name glyph-orientation-horizontal glyph-orientation-vertical horiz-adv-x horiz-origin-x image-rendering letter-spacing lighting-color marker-end marker-mid marker-start overline-position overline-thickness paint-order panose-1 pointer-events rendering-intent shape-rendering stop-color stop-opacity strikethrough-position strikethrough-thickness stroke-dasharray stroke-dashoffset stroke-linecap stroke-linejoin stroke-miterlimit stroke-opacity stroke-width text-anchor text-decoration text-rendering underline-position underline-thickness unicode-bidi unicode-range units-per-em v-alphabetic v-hanging v-ideographic v-mathematical vector-effect vert-adv-y vert-origin-x vert-origin-y word-spacing writing-mode xmlns:xlink x-height".split(" ").forEach(function(e){var t=e.replace(qo,Zo);ie[t]=new me(t,1,!1,e,null,!1,!1)});"xlink:actuate xlink:arcrole xlink:role xlink:show xlink:title xlink:type".split(" ").forEach(function(e){var t=e.replace(qo,Zo);ie[t]=new me(t,1,!1,e,"http://www.w3.org/1999/xlink",!1,!1)});["xml:base","xml:lang","xml:space"].forEach(function(e){var t=e.replace(qo,Zo);ie[t]=new me(t,1,!1,e,"http://www.w3.org/XML/1998/namespace",!1,!1)});["tabIndex","crossOrigin"].forEach(function(e){ie[e]=new me(e,1,!1,e.toLowerCase(),null,!1,!1)});ie.xlinkHref=new me("xlinkHref",1,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1);["src","href","action","formAction"].forEach(function(e){ie[e]=new me(e,1,!1,e.toLowerCase(),null,!0,!0)});function Jo(e,t,n,r){var l=ie.hasOwnProperty(t)?ie[t]:null;(l!==null?l.type!==0:r||!(2u||l[i]!==o[u]){var s=` -`+l[i].replace(" at new "," at ");return e.displayName&&s.includes("")&&(s=s.replace("",e.displayName)),s}while(1<=i&&0<=u);break}}}finally{Tl=!1,Error.prepareStackTrace=n}return(e=e?e.displayName||e.name:"")?jn(e):""}function gd(e){switch(e.tag){case 5:return jn(e.type);case 16:return jn("Lazy");case 13:return jn("Suspense");case 19:return jn("SuspenseList");case 0:case 2:case 15:return e=Ol(e.type,!1),e;case 11:return e=Ol(e.type.render,!1),e;case 1:return e=Ol(e.type,!0),e;default:return""}}function ro(e){if(e==null)return null;if(typeof e=="function")return e.displayName||e.name||null;if(typeof e=="string")return e;switch(e){case Ut:return"Fragment";case $t:return"Portal";case eo:return"Profiler";case bo:return"StrictMode";case to:return"Suspense";case no:return"SuspenseList"}if(typeof e=="object")switch(e.$$typeof){case hs:return(e.displayName||"Context")+".Consumer";case ys:return(e._context.displayName||"Context")+".Provider";case ei:var t=e.render;return e=e.displayName,e||(e=t.displayName||t.name||"",e=e!==""?"ForwardRef("+e+")":"ForwardRef"),e;case ti:return t=e.displayName||null,t!==null?t:ro(e.type)||"Memo";case nt:t=e._payload,e=e._init;try{return ro(e(t))}catch{}}return null}function vd(e){var t=e.type;switch(e.tag){case 24:return"Cache";case 9:return(t.displayName||"Context")+".Consumer";case 10:return(t._context.displayName||"Context")+".Provider";case 18:return"DehydratedFragment";case 11:return e=t.render,e=e.displayName||e.name||"",t.displayName||(e!==""?"ForwardRef("+e+")":"ForwardRef");case 7:return"Fragment";case 5:return t;case 4:return"Portal";case 3:return"Root";case 6:return"Text";case 16:return ro(t);case 8:return t===bo?"StrictMode":"Mode";case 22:return"Offscreen";case 12:return"Profiler";case 21:return"Scope";case 13:return"Suspense";case 19:return"SuspenseList";case 25:return"TracingMarker";case 1:case 0:case 17:case 2:case 14:case 15:if(typeof t=="function")return t.displayName||t.name||null;if(typeof t=="string")return t}return null}function ht(e){switch(typeof e){case"boolean":case"number":case"string":case"undefined":return e;case"object":return e;default:return""}}function vs(e){var t=e.type;return(e=e.nodeName)&&e.toLowerCase()==="input"&&(t==="checkbox"||t==="radio")}function wd(e){var t=vs(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&typeof n<"u"&&typeof n.get=="function"&&typeof n.set=="function"){var l=n.get,o=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return l.call(this)},set:function(i){r=""+i,o.call(this,i)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(i){r=""+i},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}function cr(e){e._valueTracker||(e._valueTracker=wd(e))}function ws(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=vs(e)?e.checked?"true":"false":e.value),e=r,e!==n?(t.setValue(e),!0):!1}function Mr(e){if(e=e||(typeof document<"u"?document:void 0),typeof e>"u")return null;try{return e.activeElement||e.body}catch{return e.body}}function lo(e,t){var n=t.checked;return K({},t,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:n??e._wrapperState.initialChecked})}function qi(e,t){var n=t.defaultValue==null?"":t.defaultValue,r=t.checked!=null?t.checked:t.defaultChecked;n=ht(t.value!=null?t.value:n),e._wrapperState={initialChecked:r,initialValue:n,controlled:t.type==="checkbox"||t.type==="radio"?t.checked!=null:t.value!=null}}function Ss(e,t){t=t.checked,t!=null&&Jo(e,"checked",t,!1)}function oo(e,t){Ss(e,t);var n=ht(t.value),r=t.type;if(n!=null)r==="number"?(n===0&&e.value===""||e.value!=n)&&(e.value=""+n):e.value!==""+n&&(e.value=""+n);else if(r==="submit"||r==="reset"){e.removeAttribute("value");return}t.hasOwnProperty("value")?io(e,t.type,n):t.hasOwnProperty("defaultValue")&&io(e,t.type,ht(t.defaultValue)),t.checked==null&&t.defaultChecked!=null&&(e.defaultChecked=!!t.defaultChecked)}function Zi(e,t,n){if(t.hasOwnProperty("value")||t.hasOwnProperty("defaultValue")){var r=t.type;if(!(r!=="submit"&&r!=="reset"||t.value!==void 0&&t.value!==null))return;t=""+e._wrapperState.initialValue,n||t===e.value||(e.value=t),e.defaultValue=t}n=e.name,n!==""&&(e.name=""),e.defaultChecked=!!e._wrapperState.initialChecked,n!==""&&(e.name=n)}function io(e,t,n){(t!=="number"||Mr(e.ownerDocument)!==e)&&(n==null?e.defaultValue=""+e._wrapperState.initialValue:e.defaultValue!==""+n&&(e.defaultValue=""+n))}var _n=Array.isArray;function Zt(e,t,n,r){if(e=e.options,t){t={};for(var l=0;l"+t.valueOf().toString()+"",t=dr.firstChild;e.firstChild;)e.removeChild(e.firstChild);for(;t.firstChild;)e.appendChild(t.firstChild)}});function $n(e,t){if(t){var n=e.firstChild;if(n&&n===e.lastChild&&n.nodeType===3){n.nodeValue=t;return}}e.textContent=t}var On={animationIterationCount:!0,aspectRatio:!0,borderImageOutset:!0,borderImageSlice:!0,borderImageWidth:!0,boxFlex:!0,boxFlexGroup:!0,boxOrdinalGroup:!0,columnCount:!0,columns:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,flexOrder:!0,gridArea:!0,gridRow:!0,gridRowEnd:!0,gridRowSpan:!0,gridRowStart:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnSpan:!0,gridColumnStart:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,floodOpacity:!0,stopOpacity:!0,strokeDasharray:!0,strokeDashoffset:!0,strokeMiterlimit:!0,strokeOpacity:!0,strokeWidth:!0},Sd=["Webkit","ms","Moz","O"];Object.keys(On).forEach(function(e){Sd.forEach(function(t){t=t+e.charAt(0).toUpperCase()+e.substring(1),On[t]=On[e]})});function Cs(e,t,n){return t==null||typeof t=="boolean"||t===""?"":n||typeof t!="number"||t===0||On.hasOwnProperty(e)&&On[e]?(""+t).trim():t+"px"}function js(e,t){e=e.style;for(var n in t)if(t.hasOwnProperty(n)){var r=n.indexOf("--")===0,l=Cs(n,t[n],r);n==="float"&&(n="cssFloat"),r?e.setProperty(n,l):e[n]=l}}var xd=K({menuitem:!0},{area:!0,base:!0,br:!0,col:!0,embed:!0,hr:!0,img:!0,input:!0,keygen:!0,link:!0,meta:!0,param:!0,source:!0,track:!0,wbr:!0});function ao(e,t){if(t){if(xd[e]&&(t.children!=null||t.dangerouslySetInnerHTML!=null))throw Error(x(137,e));if(t.dangerouslySetInnerHTML!=null){if(t.children!=null)throw Error(x(60));if(typeof t.dangerouslySetInnerHTML!="object"||!("__html"in t.dangerouslySetInnerHTML))throw Error(x(61))}if(t.style!=null&&typeof t.style!="object")throw Error(x(62))}}function co(e,t){if(e.indexOf("-")===-1)return typeof t.is=="string";switch(e){case"annotation-xml":case"color-profile":case"font-face":case"font-face-src":case"font-face-uri":case"font-face-format":case"font-face-name":case"missing-glyph":return!1;default:return!0}}var fo=null;function ni(e){return e=e.target||e.srcElement||window,e.correspondingUseElement&&(e=e.correspondingUseElement),e.nodeType===3?e.parentNode:e}var po=null,Jt=null,bt=null;function eu(e){if(e=lr(e)){if(typeof po!="function")throw Error(x(280));var t=e.stateNode;t&&(t=fl(t),po(e.stateNode,e.type,t))}}function _s(e){Jt?bt?bt.push(e):bt=[e]:Jt=e}function Ns(){if(Jt){var e=Jt,t=bt;if(bt=Jt=null,eu(e),t)for(e=0;e>>=0,e===0?32:31-(Id(e)/zd|0)|0}var fr=64,pr=4194304;function Nn(e){switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return e&4194240;case 4194304:case 8388608:case 16777216:case 33554432:case 67108864:return e&130023424;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 1073741824;default:return e}}function Vr(e,t){var n=e.pendingLanes;if(n===0)return 0;var r=0,l=e.suspendedLanes,o=e.pingedLanes,i=n&268435455;if(i!==0){var u=i&~l;u!==0?r=Nn(u):(o&=i,o!==0&&(r=Nn(o)))}else i=n&~l,i!==0?r=Nn(i):o!==0&&(r=Nn(o));if(r===0)return 0;if(t!==0&&t!==r&&!(t&l)&&(l=r&-r,o=t&-t,l>=o||l===16&&(o&4194240)!==0))return t;if(r&4&&(r|=n&16),t=e.entangledLanes,t!==0)for(e=e.entanglements,t&=r;0n;n++)t.push(e);return t}function nr(e,t,n){e.pendingLanes|=t,t!==536870912&&(e.suspendedLanes=0,e.pingedLanes=0),e=e.eventTimes,t=31-Me(t),e[t]=n}function Md(e,t){var n=e.pendingLanes&~t;e.pendingLanes=t,e.suspendedLanes=0,e.pingedLanes=0,e.expiredLanes&=t,e.mutableReadLanes&=t,e.entangledLanes&=t,t=e.entanglements;var r=e.eventTimes;for(e=e.expirationTimes;0=Ln),au=String.fromCharCode(32),cu=!1;function Ys(e,t){switch(e){case"keyup":return ff.indexOf(t.keyCode)!==-1;case"keydown":return t.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Xs(e){return e=e.detail,typeof e=="object"&&"data"in e?e.data:null}var Vt=!1;function mf(e,t){switch(e){case"compositionend":return Xs(t);case"keypress":return t.which!==32?null:(cu=!0,au);case"textInput":return e=t.data,e===au&&cu?null:e;default:return null}}function yf(e,t){if(Vt)return e==="compositionend"||!ci&&Ys(e,t)?(e=Ws(),Tr=ui=it=null,Vt=!1,e):null;switch(e){case"paste":return null;case"keypress":if(!(t.ctrlKey||t.altKey||t.metaKey)||t.ctrlKey&&t.altKey){if(t.char&&1=t)return{node:n,offset:t-e};e=r}e:{for(;n;){if(n.nextSibling){n=n.nextSibling;break e}n=n.parentNode}n=void 0}n=mu(n)}}function Js(e,t){return e&&t?e===t?!0:e&&e.nodeType===3?!1:t&&t.nodeType===3?Js(e,t.parentNode):"contains"in e?e.contains(t):e.compareDocumentPosition?!!(e.compareDocumentPosition(t)&16):!1:!1}function bs(){for(var e=window,t=Mr();t instanceof e.HTMLIFrameElement;){try{var n=typeof t.contentWindow.location.href=="string"}catch{n=!1}if(n)e=t.contentWindow;else break;t=Mr(e.document)}return t}function di(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&(t==="input"&&(e.type==="text"||e.type==="search"||e.type==="tel"||e.type==="url"||e.type==="password")||t==="textarea"||e.contentEditable==="true")}function Cf(e){var t=bs(),n=e.focusedElem,r=e.selectionRange;if(t!==n&&n&&n.ownerDocument&&Js(n.ownerDocument.documentElement,n)){if(r!==null&&di(n)){if(t=r.start,e=r.end,e===void 0&&(e=t),"selectionStart"in n)n.selectionStart=t,n.selectionEnd=Math.min(e,n.value.length);else if(e=(t=n.ownerDocument||document)&&t.defaultView||window,e.getSelection){e=e.getSelection();var l=n.textContent.length,o=Math.min(r.start,l);r=r.end===void 0?o:Math.min(r.end,l),!e.extend&&o>r&&(l=r,r=o,o=l),l=yu(n,o);var i=yu(n,r);l&&i&&(e.rangeCount!==1||e.anchorNode!==l.node||e.anchorOffset!==l.offset||e.focusNode!==i.node||e.focusOffset!==i.offset)&&(t=t.createRange(),t.setStart(l.node,l.offset),e.removeAllRanges(),o>r?(e.addRange(t),e.extend(i.node,i.offset)):(t.setEnd(i.node,i.offset),e.addRange(t)))}}for(t=[],e=n;e=e.parentNode;)e.nodeType===1&&t.push({element:e,left:e.scrollLeft,top:e.scrollTop});for(typeof n.focus=="function"&&n.focus(),n=0;n=document.documentMode,Bt=null,wo=null,zn=null,So=!1;function hu(e,t,n){var r=n.window===n?n.document:n.nodeType===9?n:n.ownerDocument;So||Bt==null||Bt!==Mr(r)||(r=Bt,"selectionStart"in r&&di(r)?r={start:r.selectionStart,end:r.selectionEnd}:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection(),r={anchorNode:r.anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset}),zn&&Wn(zn,r)||(zn=r,r=Hr(wo,"onSelect"),0Wt||(e.current=_o[Wt],_o[Wt]=null,Wt--)}function D(e,t){Wt++,_o[Wt]=e.current,e.current=t}var gt={},ce=wt(gt),ge=wt(!1),Pt=gt;function ln(e,t){var n=e.type.contextTypes;if(!n)return gt;var r=e.stateNode;if(r&&r.__reactInternalMemoizedUnmaskedChildContext===t)return r.__reactInternalMemoizedMaskedChildContext;var l={},o;for(o in n)l[o]=t[o];return r&&(e=e.stateNode,e.__reactInternalMemoizedUnmaskedChildContext=t,e.__reactInternalMemoizedMaskedChildContext=l),l}function ve(e){return e=e.childContextTypes,e!=null}function Kr(){V(ge),V(ce)}function Eu(e,t,n){if(ce.current!==gt)throw Error(x(168));D(ce,t),D(ge,n)}function sa(e,t,n){var r=e.stateNode;if(t=t.childContextTypes,typeof r.getChildContext!="function")return n;r=r.getChildContext();for(var l in r)if(!(l in t))throw Error(x(108,vd(e)||"Unknown",l));return K({},n,r)}function Yr(e){return e=(e=e.stateNode)&&e.__reactInternalMemoizedMergedChildContext||gt,Pt=ce.current,D(ce,e),D(ge,ge.current),!0}function Cu(e,t,n){var r=e.stateNode;if(!r)throw Error(x(169));n?(e=sa(e,t,Pt),r.__reactInternalMemoizedMergedChildContext=e,V(ge),V(ce),D(ce,e)):V(ge),D(ge,n)}var Ke=null,pl=!1,Ql=!1;function aa(e){Ke===null?Ke=[e]:Ke.push(e)}function Af(e){pl=!0,aa(e)}function St(){if(!Ql&&Ke!==null){Ql=!0;var e=0,t=A;try{var n=Ke;for(A=1;e>=i,l-=i,Ye=1<<32-Me(t)+l|n<T?(X=N,N=null):X=N.sibling;var F=g(m,N,h[T],S);if(F===null){N===null&&(N=X);break}e&&N&&F.alternate===null&&t(m,N),f=o(F,f,T),_===null?C=F:_.sibling=F,_=F,N=X}if(T===h.length)return n(m,N),B&&Et(m,T),C;if(N===null){for(;TT?(X=N,N=null):X=N.sibling;var Ie=g(m,N,F.value,S);if(Ie===null){N===null&&(N=X);break}e&&N&&Ie.alternate===null&&t(m,N),f=o(Ie,f,T),_===null?C=Ie:_.sibling=Ie,_=Ie,N=X}if(F.done)return n(m,N),B&&Et(m,T),C;if(N===null){for(;!F.done;T++,F=h.next())F=c(m,F.value,S),F!==null&&(f=o(F,f,T),_===null?C=F:_.sibling=F,_=F);return B&&Et(m,T),C}for(N=r(m,N);!F.done;T++,F=h.next())F=v(N,m,T,F.value,S),F!==null&&(e&&F.alternate!==null&&N.delete(F.key===null?T:F.key),f=o(F,f,T),_===null?C=F:_.sibling=F,_=F);return e&&N.forEach(function(mn){return t(m,mn)}),B&&Et(m,T),C}function M(m,f,h,S){if(typeof h=="object"&&h!==null&&h.type===Ut&&h.key===null&&(h=h.props.children),typeof h=="object"&&h!==null){switch(h.$$typeof){case ar:e:{for(var C=h.key,_=f;_!==null;){if(_.key===C){if(C=h.type,C===Ut){if(_.tag===7){n(m,_.sibling),f=l(_,h.props.children),f.return=m,m=f;break e}}else if(_.elementType===C||typeof C=="object"&&C!==null&&C.$$typeof===nt&&Lu(C)===_.type){n(m,_.sibling),f=l(_,h.props),f.ref=kn(m,_,h),f.return=m,m=f;break e}n(m,_);break}else t(m,_);_=_.sibling}h.type===Ut?(f=Ot(h.props.children,m.mode,S,h.key),f.return=m,m=f):(S=Ar(h.type,h.key,h.props,null,m.mode,S),S.ref=kn(m,f,h),S.return=m,m=S)}return i(m);case $t:e:{for(_=h.key;f!==null;){if(f.key===_)if(f.tag===4&&f.stateNode.containerInfo===h.containerInfo&&f.stateNode.implementation===h.implementation){n(m,f.sibling),f=l(f,h.children||[]),f.return=m,m=f;break e}else{n(m,f);break}else t(m,f);f=f.sibling}f=Zl(h,m.mode,S),f.return=m,m=f}return i(m);case nt:return _=h._init,M(m,f,_(h._payload),S)}if(_n(h))return w(m,f,h,S);if(gn(h))return k(m,f,h,S);Sr(m,h)}return typeof h=="string"&&h!==""||typeof h=="number"?(h=""+h,f!==null&&f.tag===6?(n(m,f.sibling),f=l(f,h),f.return=m,m=f):(n(m,f),f=ql(h,m.mode,S),f.return=m,m=f),i(m)):n(m,f)}return M}var un=ga(!0),va=ga(!1),or={},He=wt(or),Gn=wt(or),qn=wt(or);function Nt(e){if(e===or)throw Error(x(174));return e}function Si(e,t){switch(D(qn,t),D(Gn,e),D(He,or),e=t.nodeType,e){case 9:case 11:t=(t=t.documentElement)?t.namespaceURI:so(null,"");break;default:e=e===8?t.parentNode:t,t=e.namespaceURI||null,e=e.tagName,t=so(t,e)}V(He),D(He,t)}function sn(){V(He),V(Gn),V(qn)}function wa(e){Nt(qn.current);var t=Nt(He.current),n=so(t,e.type);t!==n&&(D(Gn,e),D(He,n))}function xi(e){Gn.current===e&&(V(He),V(Gn))}var H=wt(0);function br(e){for(var t=e;t!==null;){if(t.tag===13){var n=t.memoizedState;if(n!==null&&(n=n.dehydrated,n===null||n.data==="$?"||n.data==="$!"))return t}else if(t.tag===19&&t.memoizedProps.revealOrder!==void 0){if(t.flags&128)return t}else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===e)break;for(;t.sibling===null;){if(t.return===null||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}var Hl=[];function ki(){for(var e=0;en?n:4,e(!0);var r=Wl.transition;Wl.transition={};try{e(!1),t()}finally{A=n,Wl.transition=r}}function Ra(){return Le().memoizedState}function Uf(e,t,n){var r=mt(e);if(n={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null},Aa(e))Ma(t,n);else if(n=pa(e,t,n,r),n!==null){var l=fe();De(n,e,r,l),Da(n,t,r)}}function Vf(e,t,n){var r=mt(e),l={lane:r,action:n,hasEagerState:!1,eagerState:null,next:null};if(Aa(e))Ma(t,l);else{var o=e.alternate;if(e.lanes===0&&(o===null||o.lanes===0)&&(o=t.lastRenderedReducer,o!==null))try{var i=t.lastRenderedState,u=o(i,n);if(l.hasEagerState=!0,l.eagerState=u,$e(u,i)){var s=t.interleaved;s===null?(l.next=l,vi(t)):(l.next=s.next,s.next=l),t.interleaved=l;return}}catch{}finally{}n=pa(e,t,l,r),n!==null&&(l=fe(),De(n,e,r,l),Da(n,t,r))}}function Aa(e){var t=e.alternate;return e===W||t!==null&&t===W}function Ma(e,t){Fn=el=!0;var n=e.pending;n===null?t.next=t:(t.next=n.next,n.next=t),e.pending=t}function Da(e,t,n){if(n&4194240){var r=t.lanes;r&=e.pendingLanes,n|=r,t.lanes=n,li(e,n)}}var tl={readContext:Pe,useCallback:ue,useContext:ue,useEffect:ue,useImperativeHandle:ue,useInsertionEffect:ue,useLayoutEffect:ue,useMemo:ue,useReducer:ue,useRef:ue,useState:ue,useDebugValue:ue,useDeferredValue:ue,useTransition:ue,useMutableSource:ue,useSyncExternalStore:ue,useId:ue,unstable_isNewReconciler:!1},Bf={readContext:Pe,useCallback:function(e,t){return Ve().memoizedState=[e,t===void 0?null:t],e},useContext:Pe,useEffect:zu,useImperativeHandle:function(e,t,n){return n=n!=null?n.concat([e]):null,Ir(4194308,4,Pa.bind(null,t,e),n)},useLayoutEffect:function(e,t){return Ir(4194308,4,e,t)},useInsertionEffect:function(e,t){return Ir(4,2,e,t)},useMemo:function(e,t){var n=Ve();return t=t===void 0?null:t,e=e(),n.memoizedState=[e,t],e},useReducer:function(e,t,n){var r=Ve();return t=n!==void 0?n(t):t,r.memoizedState=r.baseState=t,e={pending:null,interleaved:null,lanes:0,dispatch:null,lastRenderedReducer:e,lastRenderedState:t},r.queue=e,e=e.dispatch=Uf.bind(null,W,e),[r.memoizedState,e]},useRef:function(e){var t=Ve();return e={current:e},t.memoizedState=e},useState:Iu,useDebugValue:Ni,useDeferredValue:function(e){return Ve().memoizedState=e},useTransition:function(){var e=Iu(!1),t=e[0];return e=$f.bind(null,e[1]),Ve().memoizedState=e,[t,e]},useMutableSource:function(){},useSyncExternalStore:function(e,t,n){var r=W,l=Ve();if(B){if(n===void 0)throw Error(x(407));n=n()}else{if(n=t(),re===null)throw Error(x(349));It&30||ka(r,t,n)}l.memoizedState=n;var o={value:n,getSnapshot:t};return l.queue=o,zu(Ca.bind(null,r,o,e),[e]),r.flags|=2048,bn(9,Ea.bind(null,r,o,n,t),void 0,null),n},useId:function(){var e=Ve(),t=re.identifierPrefix;if(B){var n=Xe,r=Ye;n=(r&~(1<<32-Me(r)-1)).toString(32)+n,t=":"+t+"R"+n,n=Zn++,0<\/script>",e=e.removeChild(e.firstChild)):typeof r.is=="string"?e=i.createElement(n,{is:r.is}):(e=i.createElement(n),n==="select"&&(i=e,r.multiple?i.multiple=!0:r.size&&(i.size=r.size))):e=i.createElementNS(e,n),e[Be]=t,e[Xn]=r,Ya(e,t,!1,!1),t.stateNode=e;e:{switch(i=co(n,r),n){case"dialog":U("cancel",e),U("close",e),l=r;break;case"iframe":case"object":case"embed":U("load",e),l=r;break;case"video":case"audio":for(l=0;lcn&&(t.flags|=128,r=!0,En(o,!1),t.lanes=4194304)}else{if(!r)if(e=br(i),e!==null){if(t.flags|=128,r=!0,n=e.updateQueue,n!==null&&(t.updateQueue=n,t.flags|=4),En(o,!0),o.tail===null&&o.tailMode==="hidden"&&!i.alternate&&!B)return se(t),null}else 2*q()-o.renderingStartTime>cn&&n!==1073741824&&(t.flags|=128,r=!0,En(o,!1),t.lanes=4194304);o.isBackwards?(i.sibling=t.child,t.child=i):(n=o.last,n!==null?n.sibling=i:t.child=i,o.last=i)}return o.tail!==null?(t=o.tail,o.rendering=t,o.tail=t.sibling,o.renderingStartTime=q(),t.sibling=null,n=H.current,D(H,r?n&1|2:n&1),t):(se(t),null);case 22:case 23:return zi(),r=t.memoizedState!==null,e!==null&&e.memoizedState!==null!==r&&(t.flags|=8192),r&&t.mode&1?Se&1073741824&&(se(t),t.subtreeFlags&6&&(t.flags|=8192)):se(t),null;case 24:return null;case 25:return null}throw Error(x(156,t.tag))}function qf(e,t){switch(pi(t),t.tag){case 1:return ve(t.type)&&Kr(),e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 3:return sn(),V(ge),V(ce),ki(),e=t.flags,e&65536&&!(e&128)?(t.flags=e&-65537|128,t):null;case 5:return xi(t),null;case 13:if(V(H),e=t.memoizedState,e!==null&&e.dehydrated!==null){if(t.alternate===null)throw Error(x(340));on()}return e=t.flags,e&65536?(t.flags=e&-65537|128,t):null;case 19:return V(H),null;case 4:return sn(),null;case 10:return gi(t.type._context),null;case 22:case 23:return zi(),null;case 24:return null;default:return null}}var kr=!1,ae=!1,Zf=typeof WeakSet=="function"?WeakSet:Set,E=null;function Gt(e,t){var n=e.ref;if(n!==null)if(typeof n=="function")try{n(null)}catch(r){Y(e,t,r)}else n.current=null}function Do(e,t,n){try{n()}catch(r){Y(e,t,r)}}var Bu=!1;function Jf(e,t){if(xo=Br,e=bs(),di(e)){if("selectionStart"in e)var n={start:e.selectionStart,end:e.selectionEnd};else e:{n=(n=e.ownerDocument)&&n.defaultView||window;var r=n.getSelection&&n.getSelection();if(r&&r.rangeCount!==0){n=r.anchorNode;var l=r.anchorOffset,o=r.focusNode;r=r.focusOffset;try{n.nodeType,o.nodeType}catch{n=null;break e}var i=0,u=-1,s=-1,d=0,y=0,c=e,g=null;t:for(;;){for(var v;c!==n||l!==0&&c.nodeType!==3||(u=i+l),c!==o||r!==0&&c.nodeType!==3||(s=i+r),c.nodeType===3&&(i+=c.nodeValue.length),(v=c.firstChild)!==null;)g=c,c=v;for(;;){if(c===e)break t;if(g===n&&++d===l&&(u=i),g===o&&++y===r&&(s=i),(v=c.nextSibling)!==null)break;c=g,g=c.parentNode}c=v}n=u===-1||s===-1?null:{start:u,end:s}}else n=null}n=n||{start:0,end:0}}else n=null;for(ko={focusedElem:e,selectionRange:n},Br=!1,E=t;E!==null;)if(t=E,e=t.child,(t.subtreeFlags&1028)!==0&&e!==null)e.return=t,E=e;else for(;E!==null;){t=E;try{var w=t.alternate;if(t.flags&1024)switch(t.tag){case 0:case 11:case 15:break;case 1:if(w!==null){var k=w.memoizedProps,M=w.memoizedState,m=t.stateNode,f=m.getSnapshotBeforeUpdate(t.elementType===t.type?k:Fe(t.type,k),M);m.__reactInternalSnapshotBeforeUpdate=f}break;case 3:var h=t.stateNode.containerInfo;h.nodeType===1?h.textContent="":h.nodeType===9&&h.documentElement&&h.removeChild(h.documentElement);break;case 5:case 6:case 4:case 17:break;default:throw Error(x(163))}}catch(S){Y(t,t.return,S)}if(e=t.sibling,e!==null){e.return=t.return,E=e;break}E=t.return}return w=Bu,Bu=!1,w}function Rn(e,t,n){var r=t.updateQueue;if(r=r!==null?r.lastEffect:null,r!==null){var l=r=r.next;do{if((l.tag&e)===e){var o=l.destroy;l.destroy=void 0,o!==void 0&&Do(t,n,o)}l=l.next}while(l!==r)}}function hl(e,t){if(t=t.updateQueue,t=t!==null?t.lastEffect:null,t!==null){var n=t=t.next;do{if((n.tag&e)===e){var r=n.create;n.destroy=r()}n=n.next}while(n!==t)}}function $o(e){var t=e.ref;if(t!==null){var n=e.stateNode;switch(e.tag){case 5:e=n;break;default:e=n}typeof t=="function"?t(e):t.current=e}}function qa(e){var t=e.alternate;t!==null&&(e.alternate=null,qa(t)),e.child=null,e.deletions=null,e.sibling=null,e.tag===5&&(t=e.stateNode,t!==null&&(delete t[Be],delete t[Xn],delete t[jo],delete t[Ff],delete t[Rf])),e.stateNode=null,e.return=null,e.dependencies=null,e.memoizedProps=null,e.memoizedState=null,e.pendingProps=null,e.stateNode=null,e.updateQueue=null}function Za(e){return e.tag===5||e.tag===3||e.tag===4}function Qu(e){e:for(;;){for(;e.sibling===null;){if(e.return===null||Za(e.return))return null;e=e.return}for(e.sibling.return=e.return,e=e.sibling;e.tag!==5&&e.tag!==6&&e.tag!==18;){if(e.flags&2||e.child===null||e.tag===4)continue e;e.child.return=e,e=e.child}if(!(e.flags&2))return e.stateNode}}function Uo(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.nodeType===8?n.parentNode.insertBefore(e,t):n.insertBefore(e,t):(n.nodeType===8?(t=n.parentNode,t.insertBefore(e,n)):(t=n,t.appendChild(e)),n=n._reactRootContainer,n!=null||t.onclick!==null||(t.onclick=Wr));else if(r!==4&&(e=e.child,e!==null))for(Uo(e,t,n),e=e.sibling;e!==null;)Uo(e,t,n),e=e.sibling}function Vo(e,t,n){var r=e.tag;if(r===5||r===6)e=e.stateNode,t?n.insertBefore(e,t):n.appendChild(e);else if(r!==4&&(e=e.child,e!==null))for(Vo(e,t,n),e=e.sibling;e!==null;)Vo(e,t,n),e=e.sibling}var le=null,Re=!1;function tt(e,t,n){for(n=n.child;n!==null;)Ja(e,t,n),n=n.sibling}function Ja(e,t,n){if(Qe&&typeof Qe.onCommitFiberUnmount=="function")try{Qe.onCommitFiberUnmount(sl,n)}catch{}switch(n.tag){case 5:ae||Gt(n,t);case 6:var r=le,l=Re;le=null,tt(e,t,n),le=r,Re=l,le!==null&&(Re?(e=le,n=n.stateNode,e.nodeType===8?e.parentNode.removeChild(n):e.removeChild(n)):le.removeChild(n.stateNode));break;case 18:le!==null&&(Re?(e=le,n=n.stateNode,e.nodeType===8?Bl(e.parentNode,n):e.nodeType===1&&Bl(e,n),Qn(e)):Bl(le,n.stateNode));break;case 4:r=le,l=Re,le=n.stateNode.containerInfo,Re=!0,tt(e,t,n),le=r,Re=l;break;case 0:case 11:case 14:case 15:if(!ae&&(r=n.updateQueue,r!==null&&(r=r.lastEffect,r!==null))){l=r=r.next;do{var o=l,i=o.destroy;o=o.tag,i!==void 0&&(o&2||o&4)&&Do(n,t,i),l=l.next}while(l!==r)}tt(e,t,n);break;case 1:if(!ae&&(Gt(n,t),r=n.stateNode,typeof r.componentWillUnmount=="function"))try{r.props=n.memoizedProps,r.state=n.memoizedState,r.componentWillUnmount()}catch(u){Y(n,t,u)}tt(e,t,n);break;case 21:tt(e,t,n);break;case 22:n.mode&1?(ae=(r=ae)||n.memoizedState!==null,tt(e,t,n),ae=r):tt(e,t,n);break;default:tt(e,t,n)}}function Hu(e){var t=e.updateQueue;if(t!==null){e.updateQueue=null;var n=e.stateNode;n===null&&(n=e.stateNode=new Zf),t.forEach(function(r){var l=up.bind(null,e,r);n.has(r)||(n.add(r),r.then(l,l))})}}function ze(e,t){var n=t.deletions;if(n!==null)for(var r=0;rl&&(l=i),r&=~o}if(r=l,r=q()-r,r=(120>r?120:480>r?480:1080>r?1080:1920>r?1920:3e3>r?3e3:4320>r?4320:1960*ep(r/1960))-r,10e?16:e,ut===null)var r=!1;else{if(e=ut,ut=null,ll=0,R&6)throw Error(x(331));var l=R;for(R|=4,E=e.current;E!==null;){var o=E,i=o.child;if(E.flags&16){var u=o.deletions;if(u!==null){for(var s=0;sq()-Li?Tt(e,0):Pi|=n),we(e,t)}function ic(e,t){t===0&&(e.mode&1?(t=pr,pr<<=1,!(pr&130023424)&&(pr=4194304)):t=1);var n=fe();e=Je(e,t),e!==null&&(nr(e,t,n),we(e,n))}function ip(e){var t=e.memoizedState,n=0;t!==null&&(n=t.retryLane),ic(e,n)}function up(e,t){var n=0;switch(e.tag){case 13:var r=e.stateNode,l=e.memoizedState;l!==null&&(n=l.retryLane);break;case 19:r=e.stateNode;break;default:throw Error(x(314))}r!==null&&r.delete(t),ic(e,n)}var uc;uc=function(e,t,n){if(e!==null)if(e.memoizedProps!==t.pendingProps||ge.current)he=!0;else{if(!(e.lanes&n)&&!(t.flags&128))return he=!1,Xf(e,t,n);he=!!(e.flags&131072)}else he=!1,B&&t.flags&1048576&&ca(t,Gr,t.index);switch(t.lanes=0,t.tag){case 2:var r=t.type;zr(e,t),e=t.pendingProps;var l=ln(t,ce.current);tn(t,n),l=Ci(null,t,r,e,l,n);var o=ji();return t.flags|=1,typeof l=="object"&&l!==null&&typeof l.render=="function"&&l.$$typeof===void 0?(t.tag=1,t.memoizedState=null,t.updateQueue=null,ve(r)?(o=!0,Yr(t)):o=!1,t.memoizedState=l.state!==null&&l.state!==void 0?l.state:null,wi(t),l.updater=ml,t.stateNode=l,l._reactInternals=t,Lo(t,r,e,n),t=Fo(null,t,r,!0,o,n)):(t.tag=0,B&&o&&fi(t),de(null,t,l,n),t=t.child),t;case 16:r=t.elementType;e:{switch(zr(e,t),e=t.pendingProps,l=r._init,r=l(r._payload),t.type=r,l=t.tag=ap(r),e=Fe(r,e),l){case 0:t=zo(null,t,r,e,n);break e;case 1:t=$u(null,t,r,e,n);break e;case 11:t=Mu(null,t,r,e,n);break e;case 14:t=Du(null,t,r,Fe(r.type,e),n);break e}throw Error(x(306,r,""))}return t;case 0:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),zo(e,t,r,l,n);case 1:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),$u(e,t,r,l,n);case 3:e:{if(Ha(t),e===null)throw Error(x(387));r=t.pendingProps,o=t.memoizedState,l=o.element,ma(e,t),Jr(t,r,null,n);var i=t.memoizedState;if(r=i.element,o.isDehydrated)if(o={element:r,isDehydrated:!1,cache:i.cache,pendingSuspenseBoundaries:i.pendingSuspenseBoundaries,transitions:i.transitions},t.updateQueue.baseState=o,t.memoizedState=o,t.flags&256){l=an(Error(x(423)),t),t=Uu(e,t,r,n,l);break e}else if(r!==l){l=an(Error(x(424)),t),t=Uu(e,t,r,n,l);break e}else for(xe=dt(t.stateNode.containerInfo.firstChild),ke=t,B=!0,Ae=null,n=va(t,null,r,n),t.child=n;n;)n.flags=n.flags&-3|4096,n=n.sibling;else{if(on(),r===l){t=be(e,t,n);break e}de(e,t,r,n)}t=t.child}return t;case 5:return wa(t),e===null&&To(t),r=t.type,l=t.pendingProps,o=e!==null?e.memoizedProps:null,i=l.children,Eo(r,l)?i=null:o!==null&&Eo(r,o)&&(t.flags|=32),Qa(e,t),de(e,t,i,n),t.child;case 6:return e===null&&To(t),null;case 13:return Wa(e,t,n);case 4:return Si(t,t.stateNode.containerInfo),r=t.pendingProps,e===null?t.child=un(t,null,r,n):de(e,t,r,n),t.child;case 11:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),Mu(e,t,r,l,n);case 7:return de(e,t,t.pendingProps,n),t.child;case 8:return de(e,t,t.pendingProps.children,n),t.child;case 12:return de(e,t,t.pendingProps.children,n),t.child;case 10:e:{if(r=t.type._context,l=t.pendingProps,o=t.memoizedProps,i=l.value,D(qr,r._currentValue),r._currentValue=i,o!==null)if($e(o.value,i)){if(o.children===l.children&&!ge.current){t=be(e,t,n);break e}}else for(o=t.child,o!==null&&(o.return=t);o!==null;){var u=o.dependencies;if(u!==null){i=o.child;for(var s=u.firstContext;s!==null;){if(s.context===r){if(o.tag===1){s=Ge(-1,n&-n),s.tag=2;var d=o.updateQueue;if(d!==null){d=d.shared;var y=d.pending;y===null?s.next=s:(s.next=y.next,y.next=s),d.pending=s}}o.lanes|=n,s=o.alternate,s!==null&&(s.lanes|=n),Oo(o.return,n,t),u.lanes|=n;break}s=s.next}}else if(o.tag===10)i=o.type===t.type?null:o.child;else if(o.tag===18){if(i=o.return,i===null)throw Error(x(341));i.lanes|=n,u=i.alternate,u!==null&&(u.lanes|=n),Oo(i,n,t),i=o.sibling}else i=o.child;if(i!==null)i.return=o;else for(i=o;i!==null;){if(i===t){i=null;break}if(o=i.sibling,o!==null){o.return=i.return,i=o;break}i=i.return}o=i}de(e,t,l.children,n),t=t.child}return t;case 9:return l=t.type,r=t.pendingProps.children,tn(t,n),l=Pe(l),r=r(l),t.flags|=1,de(e,t,r,n),t.child;case 14:return r=t.type,l=Fe(r,t.pendingProps),l=Fe(r.type,l),Du(e,t,r,l,n);case 15:return Va(e,t,t.type,t.pendingProps,n);case 17:return r=t.type,l=t.pendingProps,l=t.elementType===r?l:Fe(r,l),zr(e,t),t.tag=1,ve(r)?(e=!0,Yr(t)):e=!1,tn(t,n),ha(t,r,l),Lo(t,r,l,n),Fo(null,t,r,!0,e,n);case 19:return Ka(e,t,n);case 22:return Ba(e,t,n)}throw Error(x(156,t.tag))};function sc(e,t){return Fs(e,t)}function sp(e,t,n,r){this.tag=e,this.key=n,this.sibling=this.child=this.return=this.stateNode=this.type=this.elementType=null,this.index=0,this.ref=null,this.pendingProps=t,this.dependencies=this.memoizedState=this.updateQueue=this.memoizedProps=null,this.mode=r,this.subtreeFlags=this.flags=0,this.deletions=null,this.childLanes=this.lanes=0,this.alternate=null}function Te(e,t,n,r){return new sp(e,t,n,r)}function Ri(e){return e=e.prototype,!(!e||!e.isReactComponent)}function ap(e){if(typeof e=="function")return Ri(e)?1:0;if(e!=null){if(e=e.$$typeof,e===ei)return 11;if(e===ti)return 14}return 2}function yt(e,t){var n=e.alternate;return n===null?(n=Te(e.tag,t,e.key,e.mode),n.elementType=e.elementType,n.type=e.type,n.stateNode=e.stateNode,n.alternate=e,e.alternate=n):(n.pendingProps=t,n.type=e.type,n.flags=0,n.subtreeFlags=0,n.deletions=null),n.flags=e.flags&14680064,n.childLanes=e.childLanes,n.lanes=e.lanes,n.child=e.child,n.memoizedProps=e.memoizedProps,n.memoizedState=e.memoizedState,n.updateQueue=e.updateQueue,t=e.dependencies,n.dependencies=t===null?null:{lanes:t.lanes,firstContext:t.firstContext},n.sibling=e.sibling,n.index=e.index,n.ref=e.ref,n}function Ar(e,t,n,r,l,o){var i=2;if(r=e,typeof e=="function")Ri(e)&&(i=1);else if(typeof e=="string")i=5;else e:switch(e){case Ut:return Ot(n.children,l,o,t);case bo:i=8,l|=8;break;case eo:return e=Te(12,n,t,l|2),e.elementType=eo,e.lanes=o,e;case to:return e=Te(13,n,t,l),e.elementType=to,e.lanes=o,e;case no:return e=Te(19,n,t,l),e.elementType=no,e.lanes=o,e;case gs:return vl(n,l,o,t);default:if(typeof e=="object"&&e!==null)switch(e.$$typeof){case ys:i=10;break e;case hs:i=9;break e;case ei:i=11;break e;case ti:i=14;break e;case nt:i=16,r=null;break e}throw Error(x(130,e==null?e:typeof e,""))}return t=Te(i,n,t,l),t.elementType=e,t.type=r,t.lanes=o,t}function Ot(e,t,n,r){return e=Te(7,e,r,t),e.lanes=n,e}function vl(e,t,n,r){return e=Te(22,e,r,t),e.elementType=gs,e.lanes=n,e.stateNode={isHidden:!1},e}function ql(e,t,n){return e=Te(6,e,null,t),e.lanes=n,e}function Zl(e,t,n){return t=Te(4,e.children!==null?e.children:[],e.key,t),t.lanes=n,t.stateNode={containerInfo:e.containerInfo,pendingChildren:null,implementation:e.implementation},t}function cp(e,t,n,r,l){this.tag=t,this.containerInfo=e,this.finishedWork=this.pingCache=this.current=this.pendingChildren=null,this.timeoutHandle=-1,this.callbackNode=this.pendingContext=this.context=null,this.callbackPriority=0,this.eventTimes=Ll(0),this.expirationTimes=Ll(-1),this.entangledLanes=this.finishedLanes=this.mutableReadLanes=this.expiredLanes=this.pingedLanes=this.suspendedLanes=this.pendingLanes=0,this.entanglements=Ll(0),this.identifierPrefix=r,this.onRecoverableError=l,this.mutableSourceEagerHydrationData=null}function Ai(e,t,n,r,l,o,i,u,s){return e=new cp(e,t,n,u,s),t===1?(t=1,o===!0&&(t|=8)):t=0,o=Te(3,null,null,t),e.current=o,o.stateNode=e,o.memoizedState={element:r,isDehydrated:n,cache:null,transitions:null,pendingSuspenseBoundaries:null},wi(o),e}function dp(e,t,n){var r=3"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(fc)}catch(e){console.error(e)}}fc(),cs.exports=Ce;var hp=cs.exports,pc,Ju=hp;pc=Ju.createRoot,Ju.hydrateRoot;var gp=(typeof process<"u","https://huggingface.co");async function vp(e,t){var r;const n=new wp(e.url,e.status,e.headers.get("X-Request-Id")??(t==null?void 0:t.requestId));if(n.message=`Api error with status ${n.statusCode}.${t!=null&&t.message?` ${t.message}.`:""} Request ID: ${n.requestId}, url: ${n.url}`,(r=e.headers.get("Content-Type"))!=null&&r.startsWith("application/json")){const l=await e.json();n.message=l.error||l.message||n.message,n.data=l}else n.data={message:await e.text()};throw n}var wp=class extends Error{constructor(t,n,r,l){super(l);hn(this,"statusCode");hn(this,"url");hn(this,"requestId");hn(this,"data");this.statusCode=n,this.requestId=r,this.url=t}};function Sp(e){if(!(!e||e.accessToken===void 0||e.accessToken===null)&&!e.accessToken.startsWith("hf_"))throw new TypeError("Your access token must start with 'hf_'")}function xp(e){const t=/<(https?:[/][/][^>]+)>;\s+rel="([^"]+)"/g;return Object.fromEntries([...e.matchAll(t)].map(([,n,r])=>[r,n]))}var kp=["pipeline_tag","private","gated","downloads","likes"];async function*Ep(e){var r,l;Sp(e==null?void 0:e.credentials);const t=new URLSearchParams([...Object.entries({limit:"500",...(r=e==null?void 0:e.search)!=null&&r.owner?{author:e.search.owner}:void 0,...(l=e==null?void 0:e.search)!=null&&l.task?{pipeline_tag:e.search.task}:void 0}),...kp.map(o=>["expand",o])]).toString();let n=`${(e==null?void 0:e.hubUrl)||gp}/api/models?${t}`;for(;n;){const o=await fetch(n,{headers:{accept:"application/json",...e!=null&&e.credentials?{Authorization:`Bearer ${e.credentials.accessToken}`}:void 0}});if(!o.ok)throw vp(o);const i=await o.json();for(const s of i)yield{id:s._id,name:s.id,private:s.private,task:s.pipeline_tag,downloads:s.downloads,gated:s.gated,likes:s.likes,updatedAt:new Date(s.lastModified)};const u=o.headers.get("Link");n=u?xp(u).next:void 0}}var Cp=Object.defineProperty,jp=(e,t)=>{for(var n in t)Cp(e,n,{get:t[n],enumerable:!0})},_p={};jp(_p,{audioClassification:()=>yc,automaticSpeechRecognition:()=>hc,conversational:()=>Cc,documentQuestionAnswering:()=>Ac,featureExtraction:()=>jc,fillMask:()=>_c,imageClassification:()=>vc,imageSegmentation:()=>wc,imageToImage:()=>Ec,imageToText:()=>Sc,objectDetection:()=>xc,questionAnswering:()=>Nc,request:()=>$,sentenceSimilarity:()=>Tc,streamingRequest:()=>Ui,summarization:()=>Oc,tableQuestionAnswering:()=>Pc,tabularRegression:()=>Dc,textClassification:()=>Lc,textGeneration:()=>Ic,textGenerationStream:()=>Lp,textToImage:()=>kc,textToSpeech:()=>gc,tokenClassification:()=>zc,translation:()=>Fc,visualQuestionAnswering:()=>Mc,zeroShotClassification:()=>Rc});var Np="https://api-inference.huggingface.co/models/";function mc(e,t){const{model:n,accessToken:r,...l}=e,o={};r&&(o.Authorization=`Bearer ${r}`);const i="data"in e&&!!e.data;i?(t!=null&&t.wait_for_model&&(o["X-Wait-For-Model"]="true"),(t==null?void 0:t.use_cache)===!1&&(o["X-Use-Cache"]="false"),t!=null&&t.dont_load_model&&(o["X-Load-Model"]="0")):o["Content-Type"]="application/json";const u=/^http(s?):/.test(n)||n.startsWith("/")?n:`${Np}${n}`,s={headers:o,method:"POST",body:i?e.data:JSON.stringify({...l,options:t}),credentials:t!=null&&t.includeCredentials?"include":"same-origin"};return{url:u,info:s}}async function $(e,t){var o,i;const{url:n,info:r}=mc(e,t),l=await((t==null?void 0:t.fetch)??fetch)(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return $(e,{...t,wait_for_model:!0});if(!l.ok){if((o=l.headers.get("Content-Type"))!=null&&o.startsWith("application/json")){const u=await l.json();if(u.error)throw new Error(u.error)}throw new Error("An error occurred while fetching the blob")}return(i=l.headers.get("Content-Type"))!=null&&i.startsWith("application/json")?await l.json():await l.blob()}function Tp(e){let t,n,r,l=!1;return function(i){t===void 0?(t=i,n=0,r=-1):t=Pp(t,i);const u=t.length;let s=0;for(;n0){const s=l.decode(i.subarray(0,u)),d=u+(i[u+1]===32?2:1),y=l.decode(i.subarray(d));switch(s){case"data":r.data=r.data?r.data+` -`+y:y;break;case"event":r.event=y;break;case"id":e(r.id=y);break;case"retry":const c=parseInt(y,10);isNaN(c)||t(r.retry=c);break}}}}function Pp(e,t){const n=new Uint8Array(e.length+t.length);return n.set(e),n.set(t,e.length),n}function bu(){return{data:"",event:"",id:"",retry:void 0}}async function*Ui(e,t){var d;const{url:n,info:r}=mc({...e,stream:!0},t),l=await((t==null?void 0:t.fetch)??fetch)(n,r);if((t==null?void 0:t.retry_on_error)!==!1&&l.status===503&&!(t!=null&&t.wait_for_model))return Ui(e,{...t,wait_for_model:!0});if(!l.ok){if((d=l.headers.get("Content-Type"))!=null&&d.startsWith("application/json")){const y=await l.json();if(y.error)throw new Error(y.error)}throw new Error(`Server response contains error: ${l.status}`)}if(l.headers.get("content-type")!=="text/event-stream")throw new Error("Server does not support event stream content type, it returned "+l.headers.get("content-type"));if(!l.body)return;const o=l.body.getReader();let i=[];const s=Tp(Op(()=>{},()=>{},y=>{i.push(y)}));try{for(;;){const{done:y,value:c}=await o.read();if(y)return;s(c);for(const g of i)if(g.data.length>0){const v=JSON.parse(g.data);if(typeof v=="object"&&v!==null&&"error"in v)throw new Error(v.error);yield v}i=[]}}finally{o.releaseLock()}}var Q=class extends TypeError{constructor(e){super(`Invalid inference output: ${e}. Use the 'request' method with the same parameters to do a custom call with no type checking.`),this.name="InferenceOutputError"}};async function yc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Q("Expected Array<{label: string, score: number}>");return n}async function hc(e,t){const n=await $(e,t);if(!(typeof(n==null?void 0:n.text)=="string"))throw new Q("Expected {text: string}");return n}async function gc(e,t){const n=await $(e,t);if(!(n&&n instanceof Blob))throw new Q("Expected Blob");return n}async function vc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number")))throw new Q("Expected Array<{label: string, score: number}>");return n}async function wc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.mask=="string"&&typeof l.score=="number")))throw new Q("Expected Array<{label: string, mask: string, score: number}>");return n}async function Sc(e,t){var r;const n=(r=await $(e,t))==null?void 0:r[0];if(typeof(n==null?void 0:n.generated_text)!="string")throw new Q("Expected {generated_text: string}");return n}async function xc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.label=="string"&&typeof l.score=="number"&&typeof l.box.xmin=="number"&&typeof l.box.ymin=="number"&&typeof l.box.xmax=="number"&&typeof l.box.ymax=="number")))throw new Q("Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>");return n}async function kc(e,t){const n=await $(e,t);if(!(n&&n instanceof Blob))throw new Q("Expected Blob");return n}function Vi(e){if(globalThis.Buffer)return globalThis.Buffer.from(e).toString("base64");{const t=[];return e.forEach(n=>{t.push(String.fromCharCode(n))}),globalThis.btoa(t.join(""))}}async function Ec(e,t){let n;e.parameters?n={...e,inputs:Vi(new Uint8Array(e.inputs instanceof ArrayBuffer?e.inputs:await e.inputs.arrayBuffer()))}:n={accessToken:e.accessToken,model:e.model,data:e.inputs};const r=await $(n,t);if(!(r&&r instanceof Blob))throw new Q("Expected Blob");return r}async function Cc(e,t){const n=await $(e,t);if(!(Array.isArray(n.conversation.generated_responses)&&n.conversation.generated_responses.every(l=>typeof l=="string")&&Array.isArray(n.conversation.past_user_inputs)&&n.conversation.past_user_inputs.every(l=>typeof l=="string")&&typeof n.generated_text=="string"&&Array.isArray(n.warnings)&&n.warnings.every(l=>typeof l=="string")))throw new Q("Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}");return n}async function jc(e,t){const n=await $(e,t);let r=!0;if(Array.isArray(n)){for(const l of n)if(Array.isArray(l)){if(r=l.every(o=>typeof o=="number"),!r)break}else if(typeof l!="number"){r=!1;break}}else r=!1;if(!r)throw new Q("Expected Array");return n}async function _c(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l.score=="number"&&typeof l.sequence=="string"&&typeof l.token=="number"&&typeof l.token_str=="string")))throw new Q("Expected Array<{score: number, sequence: string, token: number, token_str: string}>");return n}async function Nc(e,t){const n=await $(e,t);if(!(typeof n=="object"&&!!n&&typeof n.answer=="string"&&typeof n.end=="number"&&typeof n.score=="number"&&typeof n.start=="number"))throw new Q("Expected {answer: string, end: number, score: number, start: number}");return n}async function Tc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l=="number")))throw new Q("Expected number[]");return n}async function Oc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.summary_text)=="string")))throw new Q("Expected Array<{summary_text: string}>");return n==null?void 0:n[0]}async function Pc(e,t){const n=await $(e,t);if(!(typeof(n==null?void 0:n.aggregator)=="string"&&typeof n.answer=="string"&&Array.isArray(n.cells)&&n.cells.every(l=>typeof l=="string")&&Array.isArray(n.coordinates)&&n.coordinates.every(l=>Array.isArray(l)&&l.every(o=>typeof o=="number"))))throw new Q("Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}");return n}async function Lc(e,t){var l;const n=(l=await $(e,t))==null?void 0:l[0];if(!(Array.isArray(n)&&n.every(o=>typeof(o==null?void 0:o.label)=="string"&&typeof o.score=="number")))throw new Q("Expected Array<{label: string, score: number}>");return n}async function Ic(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.generated_text)=="string")))throw new Q("Expected Array<{generated_text: string}>");return n==null?void 0:n[0]}async function*Lp(e,t){yield*Ui(e,t)}function Bi(e){return Array.isArray(e)?e:[e]}async function zc(e,t){const n=Bi(await $(e,t));if(!(Array.isArray(n)&&n.every(l=>typeof l.end=="number"&&typeof l.entity_group=="string"&&typeof l.score=="number"&&typeof l.start=="number"&&typeof l.word=="string")))throw new Q("Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>");return n}async function Fc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof(l==null?void 0:l.translation_text)=="string")))throw new Q("Expected type Array<{translation_text: string}>");return n==null?void 0:n[0]}async function Rc(e,t){const n=Bi(await $(e,t));if(!(Array.isArray(n)&&n.every(l=>Array.isArray(l.labels)&&l.labels.every(o=>typeof o=="string")&&Array.isArray(l.scores)&&l.scores.every(o=>typeof o=="number")&&typeof l.sequence=="string")))throw new Q("Expected Array<{labels: string[], scores: number[], sequence: string}>");return n}async function Ac(e,t){var o;const n={...e,inputs:{question:e.inputs.question,image:Vi(new Uint8Array(e.inputs.image instanceof ArrayBuffer?e.inputs.image:await e.inputs.image.arrayBuffer()))}},r=(o=Bi(await $(n,t)))==null?void 0:o[0];if(!(typeof(r==null?void 0:r.answer)=="string"&&(typeof r.end=="number"||typeof r.end>"u")&&(typeof r.score=="number"||typeof r.score>"u")&&(typeof r.start=="number"||typeof r.start>"u")))throw new Q("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");return r}async function Mc(e,t){var o;const n={...e,inputs:{question:e.inputs.question,image:Vi(new Uint8Array(e.inputs.image instanceof ArrayBuffer?e.inputs.image:await e.inputs.image.arrayBuffer()))}},r=(o=await $(n,t))==null?void 0:o[0];if(!(typeof(r==null?void 0:r.answer)=="string"&&typeof r.score=="number"))throw new Q("Expected Array<{answer: string, score: number}>");return r}async function Dc(e,t){const n=await $(e,t);if(!(Array.isArray(n)&&n.every(l=>typeof l=="number")))throw new Q("Expected number[]");return n}const O=e=>a.jsx("button",{className:`${e.variant==="secondary"?"border-4 border-yellow-200":"bg-yellow-200"} py-6 text-center w-full ${e.disabled?"cursor-not-allowed opacity-50":""}`,disabled:e.disabled??!1,onClick:e.onClick,children:e.label??"Submit"}),$c=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),e.input?a.jsx("audio",{className:"w-full",controls:!0,src:URL.createObjectURL(e.input)}):a.jsxs("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",a.jsx("input",{accept:"audio/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInput(t.target.files[0])},type:"file"})]})]}),P=e=>{const t=(()=>{try{return JSON.stringify(e.output,void 0,2)}catch(n){if(n instanceof Error)return`Error during JSON.stringify: ${n.message}`}})();return a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Output"}),a.jsx("pre",{className:`bg-yellow-200 break-words p-6 select-text w-full whitespace-pre-wrap ${e.disabled?"cursor-wait opacity-50":""}`,children:t})]})},Ip="audio-classification",zp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await yc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx($c,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Fp="automatic-speech-recognition",Rp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await hc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx($c,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},J=e=>{const t=p.useRef(null);return p.useLayoutEffect(()=>{t.current&&(t.current.style.height="inherit",t.current.style.height=`${t.current.scrollHeight}px`)},[e.input]),a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),a.jsx("textarea",{className:"bg-yellow-200 py-6 resize-none text-center w-full",disabled:e.disabled??!1,onChange:n=>{!e.disabled&&e.setInput&&(n.target.value?e.setInput(n.target.value):e.setInput(""))},ref:t,rows:1,style:{height:t.current?`${t.current.scrollHeight}px`:"inherit"},value:e.input??""})]})},Ap="conversational",Mp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=()=>{t&&(l(!0),s(c=>c?{...c,conversation:{...c.conversation,past_user_inputs:[...c.conversation.past_user_inputs,t]}}:{conversation:{generated_responses:[],past_user_inputs:[t]},generated_text:"",warnings:[]}),n(void 0),Cc({inputs:{generated_responses:u==null?void 0:u.conversation.generated_responses,past_user_inputs:u==null?void 0:u.conversation.past_user_inputs,text:t},model:e.model}).then(s).catch(i).finally(()=>l(!1)))};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t&&!u,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?Array.from({length:Math.max(u.conversation.generated_responses.length,u.conversation.past_user_inputs.length)}).map((c,g,v)=>a.jsxs(p.Fragment,{children:[u.conversation.generated_responses[v.length-g-1]?a.jsx(P,{disabled:r,label:`Output - Generated Response #${v.length-g}`,output:u.conversation.generated_responses[v.length-g-1]}):a.jsx(p.Fragment,{}),u.conversation.past_user_inputs[v.length-g-1]?a.jsx(J,{disabled:!0,label:`Output - Past User Input #${v.length-g}`,input:u.conversation.past_user_inputs[v.length-g-1]}):a.jsx(p.Fragment,{})]},g)):a.jsx(p.Fragment,{})]})},Mt=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),e.input?a.jsx("img",{className:"w-full",src:URL.createObjectURL(e.input)}):a.jsxs("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",a.jsx("input",{accept:"image/*",className:"hidden",onChange:t=>{t.target.files&&t.target.files[0]&&e.setInput(t.target.files[0])},type:"file"})]})]}),Dp="document-question-answering",$p=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[d,y]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),y(void 0)},g=async()=>{if(t&&r){i(!0);try{const v=await Ac({inputs:{question:t,image:r},model:e.model});y(v)}catch(v){v instanceof Error&&s(v)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Question",setInput:n}),a.jsx(Mt,{input:r,label:"Input - Image",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!r,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!r,onClick:g}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u.message}):a.jsx(p.Fragment,{}),!u&&d?a.jsx(P,{disabled:o,output:d}):a.jsx(p.Fragment,{})]})},Up="feature-extraction",Vp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await jc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},Bp="fill-mask",Qp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await _c({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.token_str)):a.jsx(p.Fragment,{})]})},Hp="image-classification",Wp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await vc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mt,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Kp="image-segmentation",Yp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await wc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mt,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},Uc=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Output"}),a.jsx("img",{className:`w-full ${e.disabled?"cursor-wait opacity-50":""}`,src:URL.createObjectURL(e.output)})]}),Xp="image-to-image",Gp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Ec({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mt,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(Uc,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},qp="image-to-text",Zp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Sc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mt,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},Jp="object-detection",bp=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await xc({data:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Mt,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},em="question-answering",tm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[d,y]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),y(void 0)},g=async()=>{if(t&&r){i(!0);try{const v=await Nc({inputs:{question:t,context:r},model:e.model});y(v)}catch(v){v instanceof Error&&s(v)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Question",setInput:n}),a.jsx(J,{input:r,label:"Input - Context",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!t||!r,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!t||!r,onClick:g}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u.message}):a.jsx(p.Fragment,{}),!u&&d?a.jsx(P,{disabled:o,output:d}):a.jsx(p.Fragment,{})]})},nm="sentence-similarity",rm=e=>{const[t,n]=p.useState(),r=Array.from({length:2}).map(()=>{}),[l,o]=p.useState(r),[i,u]=p.useState(!1),[s,d]=p.useState(),[y,c]=p.useState(),g=()=>{n(void 0),o(r),d(void 0),c(void 0)},v=async()=>{if(t&&l.every(Boolean)){u(!0);try{const w=await Tc({inputs:{source_sentence:t,sentences:l},model:e.model});c(w)}catch(w){w instanceof Error&&d(w)}finally{u(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Source Sentence",setInput:n}),l.map((w,k)=>a.jsx(J,{input:w,label:`Input - Sentence #${k+1}`,setInput:M=>o(m=>[...m.slice(0,k),M,...m.slice(k+1,m.length)])})),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Add Sentence",onClick:()=>o(w=>[...w,void 0])}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Clear",onClick:g,variant:"secondary"}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),onClick:v}),!i&&s?a.jsx(P,{disabled:i,label:"Error",output:s.message}):a.jsx(p.Fragment,{}),!s&&y?y.map((w,k)=>a.jsx(P,{disabled:i,label:`Output - Sentence #${k+1}`,output:w})):a.jsx(p.Fragment,{})]})},lm="summarization",om=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Oc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},im=async e=>{const t=await e.text();try{const n=JSON.parse(t);try{return JSON.stringify(n,void 0,2)}catch(r){if(r instanceof Error)return`Error during JSON.stringify: ${r.message}`}}catch(n){if(n instanceof Error)return`Error during JSON.parse: ${n.message}`}},Vc=e=>{const[t,n]=p.useState();return p.useEffect(()=>{e.input&&im(e.input).then(n)},[e.input]),a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Input"}),e.input?a.jsx("pre",{className:"bg-yellow-200 break-words p-6 select-text w-full whitespace-pre-wrap",children:t}):a.jsxs("label",{className:"bg-yellow-200 block cursor-pointer py-6 text-center w-full",children:["No file chosen",a.jsx("input",{accept:".json",className:"hidden",onChange:r=>{r.target.files&&r.target.files[0]&&e.setInput(r.target.files[0])},type:"file"})]})]})},um="table-question-answering",sm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[d,y]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),y(void 0)},g=async()=>{if(t&&r){i(!0);try{const v=await Pc({inputs:{query:t,table:JSON.parse(await r.text()??"{}")},model:e.model});y(v)}catch(v){v instanceof Error&&s(v)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Query",setInput:n}),a.jsx(Vc,{input:r,label:"Input - Table",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!t,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!t,onClick:g}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u.message}):a.jsx(p.Fragment,{}),!u&&d?a.jsx(P,{disabled:o,output:d}):a.jsx(p.Fragment,{})]})},am="tabular-regression",cm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Dc({inputs:{data:JSON.parse(await t.text()??"{}")},model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(Vc,{input:t,setInput:n}),a.jsx(O,{disabled:r||!t,label:"Clear",onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map((c,g)=>a.jsx(P,{disabled:r,label:`Output - Sentence #${g+1}`,output:c})):a.jsx(p.Fragment,{})]})},dm="text-classification",fm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Lc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.label)):a.jsx(p.Fragment,{})]})},pm="text-generation",mm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Ic({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},ym="text-to-image",hm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await kc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(Uc,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},gm=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:e.label??"Output"}),a.jsx("audio",{className:`w-full ${e.disabled?"cursor-wait opacity-50":""}`,controls:!0,src:URL.createObjectURL(e.output)})]}),vm="text-to-speech",wm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await gc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(gm,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},Sm="token-classification",xm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await zc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?u.map(c=>a.jsx(P,{disabled:r,output:c},c.word)):a.jsx(p.Fragment,{})]})},km="translation",Em=e=>{const[t,n]=p.useState(),[r,l]=p.useState(!1),[o,i]=p.useState(),[u,s]=p.useState(),d=()=>{n(void 0),i(void 0),s(void 0)},y=async()=>{if(t){l(!0);try{const c=await Fc({inputs:t,model:e.model});s(c)}catch(c){c instanceof Error&&i(c)}finally{l(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),a.jsx(O,{label:"Clear",disabled:r||!t,onClick:d,variant:"secondary"}),a.jsx(O,{disabled:r||!t,onClick:y}),!r&&o?a.jsx(P,{disabled:r,label:"Error",output:o.message}):a.jsx(p.Fragment,{}),!o&&u?a.jsx(P,{disabled:r,output:u}):a.jsx(p.Fragment,{})]})},Cm="visual-question-answering",jm=e=>{const[t,n]=p.useState(),[r,l]=p.useState(),[o,i]=p.useState(!1),[u,s]=p.useState(),[d,y]=p.useState(),c=()=>{n(void 0),l(void 0),s(void 0),y(void 0)},g=async()=>{if(t&&r){i(!0);try{const v=await Mc({inputs:{question:t,image:r},model:e.model});y(v)}catch(v){v instanceof Error&&s(v)}finally{i(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,label:"Input - Question",setInput:n}),a.jsx(Mt,{input:r,label:"Input - Image",setInput:l}),a.jsx(O,{label:"Clear",disabled:o||!r,onClick:c,variant:"secondary"}),a.jsx(O,{disabled:o||!r,onClick:g}),!o&&u?a.jsx(P,{disabled:o,label:"Error",output:u.message}):a.jsx(p.Fragment,{}),!u&&d?a.jsx(P,{disabled:o,output:d}):a.jsx(p.Fragment,{})]})},_m="zero-shot-classification",Nm=e=>{const[t,n]=p.useState(),r=Array.from({length:2}).map(()=>{}),[l,o]=p.useState(r),[i,u]=p.useState(!1),[s,d]=p.useState(),[y,c]=p.useState(),g=()=>{n(void 0),o(r),d(void 0),c(void 0)},v=async()=>{if(t&&l.every(Boolean)){u(!0);try{const w=await Rc({inputs:t,model:e.model,parameters:{candidate_labels:l}});c(w)}catch(w){w instanceof Error&&d(w)}finally{u(!1)}}};return a.jsxs(p.Fragment,{children:[a.jsx(J,{input:t,setInput:n}),l.map((w,k)=>a.jsx(J,{input:w,label:`Parameter - Candidate Label #${k+1}`,setInput:M=>o(m=>[...m.slice(0,k),M,...m.slice(k+1,m.length)])})),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Add Candidate Label",onClick:()=>o(w=>[...w,void 0])}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),label:"Clear",onClick:g,variant:"secondary"}),a.jsx(O,{disabled:i||!t||!l.every(Boolean),onClick:v}),!i&&s?a.jsx(P,{disabled:i,label:"Error",output:s.message}):a.jsx(p.Fragment,{}),!s&&y?y.map((w,k)=>a.jsx(P,{disabled:i,output:w})):a.jsx(p.Fragment,{})]})},Tm=[Ip,Fp,Ap,Dp,Up,Bp,Hp,Kp,Xp,qp,Jp,em,nm,lm,um,am,dm,pm,ym,vm,Sm,km,Cm,_m],Om=e=>{if(!e.model||!e.task)return a.jsx(p.Fragment,{});switch(e.task){case"audio-classification":return a.jsx(zp,{model:e.model});case"automatic-speech-recognition":return a.jsx(Rp,{model:e.model});case"conversational":return a.jsx(Mp,{model:e.model});case"document-question-answering":return a.jsx($p,{model:e.model});case"feature-extraction":return a.jsx(Vp,{model:e.model});case"fill-mask":return a.jsx(Qp,{model:e.model});case"image-classification":return a.jsx(Wp,{model:e.model});case"image-segmentation":return a.jsx(Yp,{model:e.model});case"image-to-image":return a.jsx(Gp,{model:e.model});case"image-to-text":return a.jsx(Zp,{model:e.model});case"object-detection":return a.jsx(bp,{model:e.model});case"question-answering":return a.jsx(tm,{model:e.model});case"sentence-similarity":return a.jsx(rm,{model:e.model});case"summarization":return a.jsx(om,{model:e.model});case"table-question-answering":return a.jsx(sm,{model:e.model});case"tabular-regression":return a.jsx(cm,{model:e.model});case"text-classification":return a.jsx(fm,{model:e.model});case"text-generation":return a.jsx(mm,{model:e.model});case"text-to-image":return a.jsx(hm,{model:e.model});case"text-to-speech":return a.jsx(wm,{model:e.model});case"token-classification":return a.jsx(xm,{model:e.model});case"translation":return a.jsx(Em,{model:e.model});case"visual-question-answering":return a.jsx(jm,{model:e.model});case"zero-shot-classification":return a.jsx(Nm,{model:e.model});default:return a.jsx(p.Fragment,{})}},Pm=e=>a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:"Task"}),a.jsxs("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:t=>e.onTaskSelect(t.target.value),placeholder:"Select a task",value:e.task,children:[a.jsx("option",{children:"Select a task"}),Tm.map(t=>a.jsx("option",{value:t,children:t},t))]})]}),Jl={},Lm=async e=>{if(Jl[e])return Jl[e];const t=[];for await(const n of Ep({search:{task:e}}))t.push(n);return t.sort((n,r)=>n.downloads>r.downloads?-1:n.downloadsr.likes?-1:n.likesr.name?-1:n.name{const[t,n]=p.useState(!1),[r,l]=p.useState([]);return p.useEffect(()=>{l([]),e.task&&(n(!0),Lm(e.task).then(o=>l(o)).finally(()=>n(!1)))},[e.task]),r.length>0?a.jsxs("div",{className:"w-full",children:[a.jsx("p",{className:"text-xl",children:"Model"}),a.jsxs("select",{className:"bg-yellow-200 cursor-pointer py-6 text-center w-full",onChange:o=>e.onModelSelect(o.target.value),placeholder:"Select a model",value:e.model,children:[a.jsx("option",{children:"Select a model"}),r.map(o=>a.jsx("option",{value:o.name,children:o.name},o.name))]}),e.model?a.jsx("div",{className:"font-bold py-6 text-center text-yellow-200",children:a.jsx("a",{href:`https://huggingface.co/${e.model}`,rel:"noopener noferrer",target:"_blank",children:"View model on 🤗"})}):a.jsx(p.Fragment,{})]}):a.jsx("p",{className:"text-center w-full",children:e.task?t?"Loading models for this task":"No models available for this task":"Select a task to view available models"})},zm=()=>{const[e,t]=p.useState(),[n,r]=p.useState(),l=o=>{r(void 0),t(o)};return a.jsx("div",{className:"bg-yellow-500 flex flex-col h-full items-center min-h-screen min-w-screen overflow-auto w-full",children:a.jsxs("div",{className:"flex flex-col items-center justify-center py-24 space-y-12 w-2/3 lg:w-1/3",children:[a.jsx("header",{className:"text-center text-6xl",children:"🤗"}),a.jsx(Pm,{onTaskSelect:l,task:e}),a.jsx(Im,{model:n,onModelSelect:r,task:e}),a.jsx(Om,{model:n,task:e})]})})};const Fm=()=>{const e="root",t=document.getElementById(e);if(t){const n=pc(t),r=a.jsx(p.StrictMode,{children:a.jsx(zm,{})});n.render(r)}};Fm(); diff --git a/spaces/amanatid/Melissa_The_PubMedGPT_with_Voice_and_featuring_answers/README.md b/spaces/amanatid/Melissa_The_PubMedGPT_with_Voice_and_featuring_answers/README.md deleted file mode 100644 index b464f0eaa92c657c99f84b12a5dd19825d821e1e..0000000000000000000000000000000000000000 --- a/spaces/amanatid/Melissa_The_PubMedGPT_with_Voice_and_featuring_answers/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Melissa The PubMedGPT With Voice And Featuring Answers -emoji: 🔥 -colorFrom: indigo -colorTo: indigo -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/include/pa_mac_core.h b/spaces/amarchheda/ChordDuplicate/portaudio/include/pa_mac_core.h deleted file mode 100644 index beb539619a19e5025b6874fc5cbdc0b0b704557b..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/include/pa_mac_core.h +++ /dev/null @@ -1,191 +0,0 @@ -#ifndef PA_MAC_CORE_H -#define PA_MAC_CORE_H -/* - * PortAudio Portable Real-Time Audio Library - * Macintosh Core Audio specific extensions - * portaudio.h should be included before this file. - * - * Copyright (c) 2005-2006 Bjorn Roche - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file - * @ingroup public_header - * @brief CoreAudio-specific PortAudio API extension header file. - */ - -#include "portaudio.h" - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * A pointer to a paMacCoreStreamInfo may be passed as - * the hostApiSpecificStreamInfo in the PaStreamParameters struct - * when opening a stream or querying the format. Use NULL, for the - * defaults. Note that for duplex streams, flags for input and output - * should be the same or behaviour is undefined. - */ -typedef struct -{ - unsigned long size; /**size of whole structure including this header */ - PaHostApiTypeId hostApiType; /**host API for which this data is intended */ - unsigned long version; /**structure version */ - unsigned long flags; /** flags to modify behaviour */ - SInt32 const * channelMap; /** Channel map for HAL channel mapping , if not needed, use NULL;*/ - unsigned long channelMapSize; /** Channel map size for HAL channel mapping , if not needed, use 0;*/ -} PaMacCoreStreamInfo; - -/** - * Functions - */ - - -/** Use this function to initialize a paMacCoreStreamInfo struct - * using the requested flags. Note that channel mapping is turned - * off after a call to this function. - * @param data The datastructure to initialize - * @param flags The flags to initialize the datastructure with. -*/ -void PaMacCore_SetupStreamInfo( PaMacCoreStreamInfo *data, unsigned long flags ); - -/** call this after pa_SetupMacCoreStreamInfo to use channel mapping as described in notes.txt. - * @param data The stream info structure to assign a channel mapping to - * @param channelMap The channel map array, as described in notes.txt. This array pointer will be used directly (ie the underlying data will not be copied), so the caller should not free the array until after the stream has been opened. - * @param channelMapSize The size of the channel map array. - */ -void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, unsigned long channelMapSize ); - -/** - * Retrieve the AudioDeviceID of the input device assigned to an open stream - * - * @param s The stream to query. - * - * @return A valid AudioDeviceID, or NULL if an error occurred. - */ -AudioDeviceID PaMacCore_GetStreamInputDevice( PaStream* s ); - -/** - * Retrieve the AudioDeviceID of the output device assigned to an open stream - * - * @param s The stream to query. - * - * @return A valid AudioDeviceID, or NULL if an error occurred. - */ -AudioDeviceID PaMacCore_GetStreamOutputDevice( PaStream* s ); - -/** - * Returns a statically allocated string with the device's name - * for the given channel. NULL will be returned on failure. - * - * This function's implementation is not complete! - * - * @param device The PortAudio device index. - * @param channel The channel number who's name is requested. - * @return a statically allocated string with the name of the device. - * Because this string is statically allocated, it must be - * copied if it is to be saved and used by the user after - * another call to this function. - * - */ -const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input ); - - -/** Retrieve the range of legal native buffer sizes for the specified device, in sample frames. - - @param device The global index of the PortAudio device about which the query is being made. - @param minBufferSizeFrames A pointer to the location which will receive the minimum buffer size value. - @param maxBufferSizeFrames A pointer to the location which will receive the maximum buffer size value. - - @see kAudioDevicePropertyBufferFrameSizeRange in the CoreAudio SDK. - */ -PaError PaMacCore_GetBufferSizeRange( PaDeviceIndex device, - long *minBufferSizeFrames, long *maxBufferSizeFrames ); - - -/** - * Flags - */ - -/** - * The following flags alter the behaviour of PA on the mac platform. - * they can be ORed together. These should work both for opening and - * checking a device. - */ - -/** Allows PortAudio to change things like the device's frame size, - * which allows for much lower latency, but might disrupt the device - * if other programs are using it, even when you are just Querying - * the device. */ -#define paMacCoreChangeDeviceParameters (0x01) - -/** In combination with the above flag, - * causes the stream opening to fail, unless the exact sample rates - * are supported by the device. */ -#define paMacCoreFailIfConversionRequired (0x02) - -/** These flags set the SR conversion quality, if required. The weird ordering - * allows Maximum Quality to be the default.*/ -#define paMacCoreConversionQualityMin (0x0100) -#define paMacCoreConversionQualityMedium (0x0200) -#define paMacCoreConversionQualityLow (0x0300) -#define paMacCoreConversionQualityHigh (0x0400) -#define paMacCoreConversionQualityMax (0x0000) - -/** - * Here are some "preset" combinations of flags (above) to get to some - * common configurations. THIS IS OVERKILL, but if more flags are added - * it won't be. - */ - -/**This is the default setting: do as much sample rate conversion as possible - * and as little mucking with the device as possible. */ -#define paMacCorePlayNice (0x00) -/**This setting is tuned for pro audio apps. It allows SR conversion on input - and output, but it tries to set the appropriate SR on the device.*/ -#define paMacCorePro (0x01) -/**This is a setting to minimize CPU usage and still play nice.*/ -#define paMacCoreMinimizeCPUButPlayNice (0x0100) -/**This is a setting to minimize CPU usage, even if that means interrupting the device. */ -#define paMacCoreMinimizeCPU (0x0101) - - -#ifdef __cplusplus -} -#endif /** __cplusplus */ - -#endif /** PA_MAC_CORE_H */ diff --git a/spaces/ankitinter9/my-draw-self-journey/share_btn.py b/spaces/ankitinter9/my-draw-self-journey/share_btn.py deleted file mode 100644 index fdb978b7f9bfb098fc824041c97db75559debfde..0000000000000000000000000000000000000000 --- a/spaces/ankitinter9/my-draw-self-journey/share_btn.py +++ /dev/null @@ -1,69 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputImageFile(imageEl){ - const res = await fetch(imageEl.src); - const blob = await res.blob(); - const imageId = Date.now(); - const fileName = `rich-text-image-${{imageId}}.png`; - return new File([blob], fileName, { type: 'image/png'}); - } - const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app'); - const negative_prompt = gradioEl.querySelector('#negative-prompt-text-input input').value; - const prompt = gradioEl.querySelector('#prompt-text-input input').value; - const upscaledImage = gradioEl.querySelector('#upscaled-image img'); - - const titleTxt = `DeepFloyd IF: ${prompt.slice(0, 50)}...`; - - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!upscaledImage){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const upscaledImageFile = await getInputImageFile(upscaledImage); - const upscaledImageURL = await uploadFile(upscaledImageFile); - - const descriptionMd = ` -### Prompt -${prompt} - -### Negative Prompt -${negative_prompt} - -### Upscaled Image -Upscaled Image - -`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/DeepFloyd/IF/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/extensions/silero_tts/test_tts.py b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/extensions/silero_tts/test_tts.py deleted file mode 100644 index ebc2c102a9ef29f21141429232f957421989cdd4..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/extensions/silero_tts/test_tts.py +++ /dev/null @@ -1,81 +0,0 @@ -import time -from pathlib import Path - -import torch -import tts_preprocessor - -torch._C._jit_set_profiling_mode(False) - - -params = { - 'activate': True, - 'speaker': 'en_49', - 'language': 'en', - 'model_id': 'v3_en', - 'sample_rate': 48000, - 'device': 'cpu', - 'show_text': True, - 'autoplay': True, - 'voice_pitch': 'medium', - 'voice_speed': 'medium', -} - -current_params = params.copy() -voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115'] -voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high'] -voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast'] - -# Used for making text xml compatible, needed for voice pitch and speed control -table = str.maketrans({ - "<": "<", - ">": ">", - "&": "&", - "'": "'", - '"': """, -}) - - -def xmlesc(txt): - return txt.translate(table) - - -def load_model(): - model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id']) - model.to(params['device']) - return model - - -model = load_model() - - -def output_modifier(string): - """ - This function is applied to the model outputs. - """ - - global model, current_params - - original_string = string - string = tts_preprocessor.preprocess(string) - processed_string = string - - if string == '': - string = '*Empty reply, try regenerating*' - else: - output_file = Path(f'extensions/silero_tts/outputs/test_{int(time.time())}.wav') - prosody = ''.format(params['voice_speed'], params['voice_pitch']) - silero_input = f'{prosody}{xmlesc(string)}' - model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file)) - - autoplay = 'autoplay' if params['autoplay'] else '' - string = f'' - - if params['show_text']: - string += f'\n\n{original_string}\n\nProcessed:\n{processed_string}' - - print(string) - - -if __name__ == '__main__': - import sys - output_modifier(sys.argv[1]) diff --git a/spaces/arch-123/bingo/src/components/ui/dialog.tsx b/spaces/arch-123/bingo/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
      - {children} -
      -
      -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/bark.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/bark.py deleted file mode 100644 index e5edffd4ef4150b47d1ad7da5a705ab4f44ed889..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/bark.py +++ /dev/null @@ -1,284 +0,0 @@ -import os -from dataclasses import dataclass -from typing import Optional - -import numpy as np -from coqpit import Coqpit -from encodec import EncodecModel -from transformers import BertTokenizer - -from TTS.tts.layers.bark.inference_funcs import ( - codec_decode, - generate_coarse, - generate_fine, - generate_text_semantic, - generate_voice, - load_voice, -) -from TTS.tts.layers.bark.load_model import load_model -from TTS.tts.layers.bark.model import GPT -from TTS.tts.layers.bark.model_fine import FineGPT -from TTS.tts.models.base_tts import BaseTTS - - -@dataclass -class BarkAudioConfig(Coqpit): - sample_rate: int = 24000 - output_sample_rate: int = 24000 - - -class Bark(BaseTTS): - def __init__( - self, - config: Coqpit, - tokenizer: BertTokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased"), - ) -> None: - super().__init__(config=config, ap=None, tokenizer=None, speaker_manager=None, language_manager=None) - self.config.num_chars = len(tokenizer) - self.tokenizer = tokenizer - self.semantic_model = GPT(config.semantic_config) - self.coarse_model = GPT(config.coarse_config) - self.fine_model = FineGPT(config.fine_config) - self.encodec = EncodecModel.encodec_model_24khz() - self.encodec.set_target_bandwidth(6.0) - - @property - def device(self): - return next(self.parameters()).device - - def load_bark_models(self): - self.semantic_model, self.config = load_model( - ckpt_path=self.config.LOCAL_MODEL_PATHS["text"], device=self.device, config=self.config, model_type="text" - ) - self.coarse_model, self.config = load_model( - ckpt_path=self.config.LOCAL_MODEL_PATHS["coarse"], - device=self.device, - config=self.config, - model_type="coarse", - ) - self.fine_model, self.config = load_model( - ckpt_path=self.config.LOCAL_MODEL_PATHS["fine"], device=self.device, config=self.config, model_type="fine" - ) - - def train_step( - self, - ): - pass - - def text_to_semantic( - self, - text: str, - history_prompt: Optional[str] = None, - temp: float = 0.7, - base=None, - allow_early_stop=True, - **kwargs, - ): - """Generate semantic array from text. - - Args: - text: text to be turned into audio - history_prompt: history choice for audio cloning - temp: generation temperature (1.0 more diverse, 0.0 more conservative) - - Returns: - numpy semantic array to be fed into `semantic_to_waveform` - """ - x_semantic = generate_text_semantic( - text, - self, - history_prompt=history_prompt, - temp=temp, - base=base, - allow_early_stop=allow_early_stop, - **kwargs, - ) - return x_semantic - - def semantic_to_waveform( - self, - semantic_tokens: np.ndarray, - history_prompt: Optional[str] = None, - temp: float = 0.7, - base=None, - ): - """Generate audio array from semantic input. - - Args: - semantic_tokens: semantic token output from `text_to_semantic` - history_prompt: history choice for audio cloning - temp: generation temperature (1.0 more diverse, 0.0 more conservative) - - Returns: - numpy audio array at sample frequency 24khz - """ - x_coarse_gen = generate_coarse( - semantic_tokens, - self, - history_prompt=history_prompt, - temp=temp, - base=base, - ) - x_fine_gen = generate_fine( - x_coarse_gen, - self, - history_prompt=history_prompt, - temp=0.5, - base=base, - ) - audio_arr = codec_decode(x_fine_gen, self) - return audio_arr, x_coarse_gen, x_fine_gen - - def generate_audio( - self, - text: str, - history_prompt: Optional[str] = None, - text_temp: float = 0.7, - waveform_temp: float = 0.7, - base=None, - allow_early_stop=True, - **kwargs, - ): - """Generate audio array from input text. - - Args: - text: text to be turned into audio - history_prompt: history choice for audio cloning - text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) - waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) - - Returns: - numpy audio array at sample frequency 24khz - """ - x_semantic = self.text_to_semantic( - text, - history_prompt=history_prompt, - temp=text_temp, - base=base, - allow_early_stop=allow_early_stop, - **kwargs, - ) - audio_arr, c, f = self.semantic_to_waveform( - x_semantic, history_prompt=history_prompt, temp=waveform_temp, base=base - ) - return audio_arr, [x_semantic, c, f] - - def generate_voice(self, audio, speaker_id, voice_dir): - """Generate a voice from the given audio and text. - - Args: - audio (str): Path to the audio file. - speaker_id (str): Speaker name. - voice_dir (str): Path to the directory to save the generate voice. - """ - if voice_dir is not None: - voice_dirs = [voice_dir] - try: - _ = load_voice(speaker_id, voice_dirs) - except (KeyError, FileNotFoundError): - output_path = os.path.join(voice_dir, speaker_id + ".npz") - os.makedirs(voice_dir, exist_ok=True) - generate_voice(audio, self, output_path) - - def _set_voice_dirs(self, voice_dirs): - def_voice_dir = None - if isinstance(self.config.DEF_SPEAKER_DIR, str): - os.makedirs(self.config.DEF_SPEAKER_DIR, exist_ok=True) - if os.path.isdir(self.config.DEF_SPEAKER_DIR): - def_voice_dir = self.config.DEF_SPEAKER_DIR - _voice_dirs = [def_voice_dir] if def_voice_dir is not None else [] - if voice_dirs is not None: - if isinstance(voice_dirs, str): - voice_dirs = [voice_dirs] - _voice_dirs = voice_dirs + _voice_dirs - return _voice_dirs - - # TODO: remove config from synthesize - def synthesize( - self, text, config, speaker_id="random", voice_dirs=None, **kwargs - ): # pylint: disable=unused-argument - """Synthesize speech with the given input text. - - Args: - text (str): Input text. - config (BarkConfig): Config with inference parameters. - speaker_id (str): One of the available speaker names. If `random`, it generates a random speaker. - speaker_wav (str): Path to the speaker audio file for cloning a new voice. It is cloned and saved in - `voice_dirs` with the name `speaker_id`. Defaults to None. - voice_dirs (List[str]): List of paths that host reference audio files for speakers. Defaults to None. - **kwargs: Model specific inference settings used by `generate_audio()` and `TTS.tts.layers.bark.inference_funcs.generate_text_semantic(). - - Returns: - A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, - `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` - as latents used at inference. - - """ - speaker_id = "random" if speaker_id is None else speaker_id - voice_dirs = self._set_voice_dirs(voice_dirs) - history_prompt = load_voice(self, speaker_id, voice_dirs) - outputs = self.generate_audio(text, history_prompt=history_prompt, **kwargs) - return_dict = { - "wav": outputs[0], - "text_inputs": text, - } - - return return_dict - - def eval_step(self): - ... - - def forward(self): - ... - - def inference(self): - ... - - @staticmethod - def init_from_config(config: "BarkConfig", **kwargs): # pylint: disable=unused-argument - return Bark(config) - - # pylint: disable=unused-argument, redefined-builtin - def load_checkpoint( - self, - config, - checkpoint_dir, - text_model_path=None, - coarse_model_path=None, - fine_model_path=None, - hubert_model_path=None, - hubert_tokenizer_path=None, - eval=False, - strict=True, - **kwargs, - ): - """Load a model checkpoints from a directory. This model is with multiple checkpoint files and it - expects to have all the files to be under the given `checkpoint_dir` with the rigth names. - If eval is True, set the model to eval mode. - - Args: - config (TortoiseConfig): The model config. - checkpoint_dir (str): The directory where the checkpoints are stored. - ar_checkpoint_path (str, optional): The path to the autoregressive checkpoint. Defaults to None. - diff_checkpoint_path (str, optional): The path to the diffusion checkpoint. Defaults to None. - clvp_checkpoint_path (str, optional): The path to the CLVP checkpoint. Defaults to None. - vocoder_checkpoint_path (str, optional): The path to the vocoder checkpoint. Defaults to None. - eval (bool, optional): Whether to set the model to eval mode. Defaults to False. - strict (bool, optional): Whether to load the model strictly. Defaults to True. - """ - text_model_path = text_model_path or os.path.join(checkpoint_dir, "text_2.pt") - coarse_model_path = coarse_model_path or os.path.join(checkpoint_dir, "coarse_2.pt") - fine_model_path = fine_model_path or os.path.join(checkpoint_dir, "fine_2.pt") - hubert_model_path = hubert_model_path or os.path.join(checkpoint_dir, "hubert.pt") - hubert_tokenizer_path = hubert_tokenizer_path or os.path.join(checkpoint_dir, "tokenizer.pth") - - self.config.LOCAL_MODEL_PATHS["text"] = text_model_path - self.config.LOCAL_MODEL_PATHS["coarse"] = coarse_model_path - self.config.LOCAL_MODEL_PATHS["fine"] = fine_model_path - self.config.LOCAL_MODEL_PATHS["hubert"] = hubert_model_path - self.config.LOCAL_MODEL_PATHS["hubert_tokenizer"] = hubert_tokenizer_path - - self.load_bark_models() - - if eval: - self.eval() diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/FlowControl.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/FlowControl.py deleted file mode 100644 index df04471f90ea33de242056d7e0cf6d6000765fae..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/FlowControl.py +++ /dev/null @@ -1,1325 +0,0 @@ -from __future__ import absolute_import - -import cython -cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object, - Builtin=object, InternalError=object, error=object, warning=object, - py_object_type=object, unspecified_type=object, - object_expr=object, fake_rhs_expr=object, TypedExprNode=object) - -from . import Builtin -from . import ExprNodes -from . import Nodes -from . import Options -from .PyrexTypes import py_object_type, unspecified_type -from . import PyrexTypes - -from .Visitor import TreeVisitor, CythonTransform -from .Errors import error, warning, InternalError -from .Optimize import ConstantFolding - - -class TypedExprNode(ExprNodes.ExprNode): - # Used for declaring assignments of a specified type without a known entry. - def __init__(self, type, may_be_none=None, pos=None): - super(TypedExprNode, self).__init__(pos) - self.type = type - self._may_be_none = may_be_none - - def may_be_none(self): - return self._may_be_none != False - -object_expr = TypedExprNode(py_object_type, may_be_none=True) -# Fake rhs to silence "unused variable" warning -fake_rhs_expr = TypedExprNode(unspecified_type) - - -class ControlBlock(object): - """Control flow graph node. Sequence of assignments and name references. - - children set of children nodes - parents set of parent nodes - positions set of position markers - - stats list of block statements - gen dict of assignments generated by this block - bounded set of entries that are definitely bounded in this block - - Example: - - a = 1 - b = a + c # 'c' is already bounded or exception here - - stats = [Assignment(a), NameReference(a), NameReference(c), - Assignment(b)] - gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)} - bounded = set([Entry(a), Entry(c)]) - - """ - - def __init__(self): - self.children = set() - self.parents = set() - self.positions = set() - - self.stats = [] - self.gen = {} - self.bounded = set() - - self.i_input = 0 - self.i_output = 0 - self.i_gen = 0 - self.i_kill = 0 - self.i_state = 0 - - def empty(self): - return (not self.stats and not self.positions) - - def detach(self): - """Detach block from parents and children.""" - for child in self.children: - child.parents.remove(self) - for parent in self.parents: - parent.children.remove(self) - self.parents.clear() - self.children.clear() - - def add_child(self, block): - self.children.add(block) - block.parents.add(self) - - -class ExitBlock(ControlBlock): - """Non-empty exit point block.""" - - def empty(self): - return False - - -class AssignmentList(object): - def __init__(self): - self.stats = [] - - -class ControlFlow(object): - """Control-flow graph. - - entry_point ControlBlock entry point for this graph - exit_point ControlBlock normal exit point - block ControlBlock current block - blocks set children nodes - entries set tracked entries - loops list stack for loop descriptors - exceptions list stack for exception descriptors - """ - - def __init__(self): - self.blocks = set() - self.entries = set() - self.loops = [] - self.exceptions = [] - - self.entry_point = ControlBlock() - self.exit_point = ExitBlock() - self.blocks.add(self.exit_point) - self.block = self.entry_point - - def newblock(self, parent=None): - """Create floating block linked to `parent` if given. - - NOTE: Block is NOT added to self.blocks - """ - block = ControlBlock() - self.blocks.add(block) - if parent: - parent.add_child(block) - return block - - def nextblock(self, parent=None): - """Create block children block linked to current or `parent` if given. - - NOTE: Block is added to self.blocks - """ - block = ControlBlock() - self.blocks.add(block) - if parent: - parent.add_child(block) - elif self.block: - self.block.add_child(block) - self.block = block - return self.block - - def is_tracked(self, entry): - if entry.is_anonymous: - return False - return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or - entry.from_closure or entry.in_closure or - entry.error_on_uninitialized) - - def is_statically_assigned(self, entry): - if (entry.is_local and entry.is_variable and - (entry.type.is_struct_or_union or - entry.type.is_complex or - entry.type.is_array or - entry.type.is_cpp_class)): - # stack allocated structured variable => never uninitialised - return True - return False - - def mark_position(self, node): - """Mark position, will be used to draw graph nodes.""" - if self.block: - self.block.positions.add(node.pos[:2]) - - def mark_assignment(self, lhs, rhs, entry): - if self.block and self.is_tracked(entry): - assignment = NameAssignment(lhs, rhs, entry) - self.block.stats.append(assignment) - self.block.gen[entry] = assignment - self.entries.add(entry) - - def mark_argument(self, lhs, rhs, entry): - if self.block and self.is_tracked(entry): - assignment = Argument(lhs, rhs, entry) - self.block.stats.append(assignment) - self.block.gen[entry] = assignment - self.entries.add(entry) - - def mark_deletion(self, node, entry): - if self.block and self.is_tracked(entry): - assignment = NameDeletion(node, entry) - self.block.stats.append(assignment) - self.block.gen[entry] = Uninitialized - self.entries.add(entry) - - def mark_reference(self, node, entry): - if self.block and self.is_tracked(entry): - self.block.stats.append(NameReference(node, entry)) - ## XXX: We don't track expression evaluation order so we can't use - ## XXX: successful reference as initialization sign. - ## # Local variable is definitely bound after this reference - ## if not node.allow_null: - ## self.block.bounded.add(entry) - self.entries.add(entry) - - def normalize(self): - """Delete unreachable and orphan blocks.""" - queue = set([self.entry_point]) - visited = set() - while queue: - root = queue.pop() - visited.add(root) - for child in root.children: - if child not in visited: - queue.add(child) - unreachable = self.blocks - visited - for block in unreachable: - block.detach() - visited.remove(self.entry_point) - for block in visited: - if block.empty(): - for parent in block.parents: # Re-parent - for child in block.children: - parent.add_child(child) - block.detach() - unreachable.add(block) - self.blocks -= unreachable - - def initialize(self): - """Set initial state, map assignments to bits.""" - self.assmts = {} - - bit = 1 - for entry in self.entries: - assmts = AssignmentList() - assmts.mask = assmts.bit = bit - self.assmts[entry] = assmts - bit <<= 1 - - for block in self.blocks: - for stat in block.stats: - if isinstance(stat, NameAssignment): - stat.bit = bit - assmts = self.assmts[stat.entry] - assmts.stats.append(stat) - assmts.mask |= bit - bit <<= 1 - - for block in self.blocks: - for entry, stat in block.gen.items(): - assmts = self.assmts[entry] - if stat is Uninitialized: - block.i_gen |= assmts.bit - else: - block.i_gen |= stat.bit - block.i_kill |= assmts.mask - block.i_output = block.i_gen - for entry in block.bounded: - block.i_kill |= self.assmts[entry].bit - - for assmts in self.assmts.values(): - self.entry_point.i_gen |= assmts.bit - self.entry_point.i_output = self.entry_point.i_gen - - def map_one(self, istate, entry): - ret = set() - assmts = self.assmts[entry] - if istate & assmts.bit: - if self.is_statically_assigned(entry): - ret.add(StaticAssignment(entry)) - elif entry.from_closure: - ret.add(Unknown) - else: - ret.add(Uninitialized) - for assmt in assmts.stats: - if istate & assmt.bit: - ret.add(assmt) - return ret - - def reaching_definitions(self): - """Per-block reaching definitions analysis.""" - dirty = True - while dirty: - dirty = False - for block in self.blocks: - i_input = 0 - for parent in block.parents: - i_input |= parent.i_output - i_output = (i_input & ~block.i_kill) | block.i_gen - if i_output != block.i_output: - dirty = True - block.i_input = i_input - block.i_output = i_output - - -class LoopDescr(object): - def __init__(self, next_block, loop_block): - self.next_block = next_block - self.loop_block = loop_block - self.exceptions = [] - - -class ExceptionDescr(object): - """Exception handling helper. - - entry_point ControlBlock Exception handling entry point - finally_enter ControlBlock Normal finally clause entry point - finally_exit ControlBlock Normal finally clause exit point - """ - - def __init__(self, entry_point, finally_enter=None, finally_exit=None): - self.entry_point = entry_point - self.finally_enter = finally_enter - self.finally_exit = finally_exit - - -class NameAssignment(object): - def __init__(self, lhs, rhs, entry): - if lhs.cf_state is None: - lhs.cf_state = set() - self.lhs = lhs - self.rhs = rhs - self.entry = entry - self.pos = lhs.pos - self.refs = set() - self.is_arg = False - self.is_deletion = False - self.inferred_type = None - - def __repr__(self): - return '%s(entry=%r)' % (self.__class__.__name__, self.entry) - - def infer_type(self): - self.inferred_type = self.rhs.infer_type(self.entry.scope) - return self.inferred_type - - def type_dependencies(self): - return self.rhs.type_dependencies(self.entry.scope) - - @property - def type(self): - if not self.entry.type.is_unspecified: - return self.entry.type - return self.inferred_type - - -class StaticAssignment(NameAssignment): - """Initialised at declaration time, e.g. stack allocation.""" - def __init__(self, entry): - if not entry.type.is_pyobject: - may_be_none = False - else: - may_be_none = None # unknown - lhs = TypedExprNode( - entry.type, may_be_none=may_be_none, pos=entry.pos) - super(StaticAssignment, self).__init__(lhs, lhs, entry) - - def infer_type(self): - return self.entry.type - - def type_dependencies(self): - return () - - -class Argument(NameAssignment): - def __init__(self, lhs, rhs, entry): - NameAssignment.__init__(self, lhs, rhs, entry) - self.is_arg = True - - -class NameDeletion(NameAssignment): - def __init__(self, lhs, entry): - NameAssignment.__init__(self, lhs, lhs, entry) - self.is_deletion = True - - def infer_type(self): - inferred_type = self.rhs.infer_type(self.entry.scope) - if (not inferred_type.is_pyobject and - inferred_type.can_coerce_to_pyobject(self.entry.scope)): - return py_object_type - self.inferred_type = inferred_type - return inferred_type - - -class Uninitialized(object): - """Definitely not initialised yet.""" - - -class Unknown(object): - """Coming from outer closure, might be initialised or not.""" - - -class NameReference(object): - def __init__(self, node, entry): - if node.cf_state is None: - node.cf_state = set() - self.node = node - self.entry = entry - self.pos = node.pos - - def __repr__(self): - return '%s(entry=%r)' % (self.__class__.__name__, self.entry) - - -class ControlFlowState(list): - # Keeps track of Node's entry assignments - # - # cf_is_null [boolean] It is uninitialized - # cf_maybe_null [boolean] May be uninitialized - # is_single [boolean] Has only one assignment at this point - - cf_maybe_null = False - cf_is_null = False - is_single = False - - def __init__(self, state): - if Uninitialized in state: - state.discard(Uninitialized) - self.cf_maybe_null = True - if not state: - self.cf_is_null = True - elif Unknown in state: - state.discard(Unknown) - self.cf_maybe_null = True - else: - if len(state) == 1: - self.is_single = True - # XXX: Remove fake_rhs_expr - super(ControlFlowState, self).__init__( - [i for i in state if i.rhs is not fake_rhs_expr]) - - def one(self): - return self[0] - - -class GVContext(object): - """Graphviz subgraph object.""" - - def __init__(self): - self.blockids = {} - self.nextid = 0 - self.children = [] - self.sources = {} - - def add(self, child): - self.children.append(child) - - def nodeid(self, block): - if block not in self.blockids: - self.blockids[block] = 'block%d' % self.nextid - self.nextid += 1 - return self.blockids[block] - - def extract_sources(self, block): - if not block.positions: - return '' - start = min(block.positions) - stop = max(block.positions) - srcdescr = start[0] - if not srcdescr in self.sources: - self.sources[srcdescr] = list(srcdescr.get_lines()) - lines = self.sources[srcdescr] - return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]]) - - def render(self, fp, name, annotate_defs=False): - """Render graphviz dot graph""" - fp.write('digraph %s {\n' % name) - fp.write(' node [shape=box];\n') - for child in self.children: - child.render(fp, self, annotate_defs) - fp.write('}\n') - - def escape(self, text): - return text.replace('"', '\\"').replace('\n', '\\n') - - -class GV(object): - """Graphviz DOT renderer.""" - - def __init__(self, name, flow): - self.name = name - self.flow = flow - - def render(self, fp, ctx, annotate_defs=False): - fp.write(' subgraph %s {\n' % self.name) - for block in self.flow.blocks: - label = ctx.extract_sources(block) - if annotate_defs: - for stat in block.stats: - if isinstance(stat, NameAssignment): - label += '\n %s [%s %s]' % ( - stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1]) - elif isinstance(stat, NameReference): - if stat.entry: - label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1]) - if not label: - label = 'empty' - pid = ctx.nodeid(block) - fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label))) - for block in self.flow.blocks: - pid = ctx.nodeid(block) - for child in block.children: - fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child))) - fp.write(' }\n') - - -class MessageCollection(object): - """Collect error/warnings messages first then sort""" - def __init__(self): - self.messages = set() - - def error(self, pos, message): - self.messages.add((pos, True, message)) - - def warning(self, pos, message): - self.messages.add((pos, False, message)) - - def report(self): - for pos, is_error, message in sorted(self.messages): - if is_error: - error(pos, message) - else: - warning(pos, message, 2) - - -def check_definitions(flow, compiler_directives): - flow.initialize() - flow.reaching_definitions() - - # Track down state - assignments = set() - # Node to entry map - references = {} - assmt_nodes = set() - - for block in flow.blocks: - i_state = block.i_input - for stat in block.stats: - i_assmts = flow.assmts[stat.entry] - state = flow.map_one(i_state, stat.entry) - if isinstance(stat, NameAssignment): - stat.lhs.cf_state.update(state) - assmt_nodes.add(stat.lhs) - i_state = i_state & ~i_assmts.mask - if stat.is_deletion: - i_state |= i_assmts.bit - else: - i_state |= stat.bit - assignments.add(stat) - if stat.rhs is not fake_rhs_expr: - stat.entry.cf_assignments.append(stat) - elif isinstance(stat, NameReference): - references[stat.node] = stat.entry - stat.entry.cf_references.append(stat) - stat.node.cf_state.update(state) - ## if not stat.node.allow_null: - ## i_state &= ~i_assmts.bit - ## # after successful read, the state is known to be initialised - state.discard(Uninitialized) - state.discard(Unknown) - for assmt in state: - assmt.refs.add(stat) - - # Check variable usage - warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized'] - warn_unused_result = compiler_directives['warn.unused_result'] - warn_unused = compiler_directives['warn.unused'] - warn_unused_arg = compiler_directives['warn.unused_arg'] - - messages = MessageCollection() - - # assignment hints - for node in assmt_nodes: - if Uninitialized in node.cf_state: - node.cf_maybe_null = True - if len(node.cf_state) == 1: - node.cf_is_null = True - else: - node.cf_is_null = False - elif Unknown in node.cf_state: - node.cf_maybe_null = True - else: - node.cf_is_null = False - node.cf_maybe_null = False - - # Find uninitialized references and cf-hints - for node, entry in references.items(): - if Uninitialized in node.cf_state: - node.cf_maybe_null = True - if not entry.from_closure and len(node.cf_state) == 1: - node.cf_is_null = True - if (node.allow_null or entry.from_closure - or entry.is_pyclass_attr or entry.type.is_error): - pass # Can be uninitialized here - elif node.cf_is_null: - if entry.error_on_uninitialized or ( - Options.error_on_uninitialized and ( - entry.type.is_pyobject or entry.type.is_unspecified)): - messages.error( - node.pos, - "local variable '%s' referenced before assignment" - % entry.name) - else: - messages.warning( - node.pos, - "local variable '%s' referenced before assignment" - % entry.name) - elif warn_maybe_uninitialized: - messages.warning( - node.pos, - "local variable '%s' might be referenced before assignment" - % entry.name) - elif Unknown in node.cf_state: - # TODO: better cross-closure analysis to know when inner functions - # are being called before a variable is being set, and when - # a variable is known to be set before even defining the - # inner function, etc. - node.cf_maybe_null = True - else: - node.cf_is_null = False - node.cf_maybe_null = False - - # Unused result - for assmt in assignments: - if (not assmt.refs and not assmt.entry.is_pyclass_attr - and not assmt.entry.in_closure): - if assmt.entry.cf_references and warn_unused_result: - if assmt.is_arg: - messages.warning(assmt.pos, "Unused argument value '%s'" % - assmt.entry.name) - else: - messages.warning(assmt.pos, "Unused result in '%s'" % - assmt.entry.name) - assmt.lhs.cf_used = False - - # Unused entries - for entry in flow.entries: - if (not entry.cf_references - and not entry.is_pyclass_attr): - if entry.name != '_' and not entry.name.startswith('unused'): - # '_' is often used for unused variables, e.g. in loops - if entry.is_arg: - if warn_unused_arg: - messages.warning(entry.pos, "Unused argument '%s'" % - entry.name) - else: - if warn_unused: - messages.warning(entry.pos, "Unused entry '%s'" % - entry.name) - entry.cf_used = False - - messages.report() - - for node in assmt_nodes: - node.cf_state = ControlFlowState(node.cf_state) - for node in references: - node.cf_state = ControlFlowState(node.cf_state) - - -class AssignmentCollector(TreeVisitor): - def __init__(self): - super(AssignmentCollector, self).__init__() - self.assignments = [] - - def visit_Node(self): - self._visitchildren(self, None) - - def visit_SingleAssignmentNode(self, node): - self.assignments.append((node.lhs, node.rhs)) - - def visit_CascadedAssignmentNode(self, node): - for lhs in node.lhs_list: - self.assignments.append((lhs, node.rhs)) - - -class ControlFlowAnalysis(CythonTransform): - - def visit_ModuleNode(self, node): - self.gv_ctx = GVContext() - self.constant_folder = ConstantFolding() - - # Set of NameNode reductions - self.reductions = set() - - self.in_inplace_assignment = False - self.env_stack = [] - self.env = node.scope - self.stack = [] - self.flow = ControlFlow() - self.visitchildren(node) - - check_definitions(self.flow, self.current_directives) - - dot_output = self.current_directives['control_flow.dot_output'] - if dot_output: - annotate_defs = self.current_directives['control_flow.dot_annotate_defs'] - fp = open(dot_output, 'wt') - try: - self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs) - finally: - fp.close() - return node - - def visit_FuncDefNode(self, node): - for arg in node.args: - if arg.default: - self.visitchildren(arg) - self.visitchildren(node, ('decorators',)) - self.env_stack.append(self.env) - self.env = node.local_scope - self.stack.append(self.flow) - self.flow = ControlFlow() - - # Collect all entries - for entry in node.local_scope.entries.values(): - if self.flow.is_tracked(entry): - self.flow.entries.add(entry) - - self.mark_position(node) - # Function body block - self.flow.nextblock() - - for arg in node.args: - self._visit(arg) - if node.star_arg: - self.flow.mark_argument(node.star_arg, - TypedExprNode(Builtin.tuple_type, - may_be_none=False), - node.star_arg.entry) - if node.starstar_arg: - self.flow.mark_argument(node.starstar_arg, - TypedExprNode(Builtin.dict_type, - may_be_none=False), - node.starstar_arg.entry) - self._visit(node.body) - # Workaround for generators - if node.is_generator: - self._visit(node.gbody.body) - - # Exit point - if self.flow.block: - self.flow.block.add_child(self.flow.exit_point) - - # Cleanup graph - self.flow.normalize() - check_definitions(self.flow, self.current_directives) - self.flow.blocks.add(self.flow.entry_point) - - self.gv_ctx.add(GV(node.local_scope.name, self.flow)) - - self.flow = self.stack.pop() - self.env = self.env_stack.pop() - return node - - def visit_DefNode(self, node): - node.used = True - return self.visit_FuncDefNode(node) - - def visit_GeneratorBodyDefNode(self, node): - return node - - def visit_CTypeDefNode(self, node): - return node - - def mark_assignment(self, lhs, rhs=None): - if not self.flow.block: - return - if self.flow.exceptions: - exc_descr = self.flow.exceptions[-1] - self.flow.block.add_child(exc_descr.entry_point) - self.flow.nextblock() - - if not rhs: - rhs = object_expr - if lhs.is_name: - if lhs.entry is not None: - entry = lhs.entry - else: - entry = self.env.lookup(lhs.name) - if entry is None: # TODO: This shouldn't happen... - return - self.flow.mark_assignment(lhs, rhs, entry) - elif lhs.is_sequence_constructor: - for i, arg in enumerate(lhs.args): - if not rhs or arg.is_starred: - item_node = None - else: - item_node = rhs.inferable_item_node(i) - self.mark_assignment(arg, item_node) - else: - self._visit(lhs) - - if self.flow.exceptions: - exc_descr = self.flow.exceptions[-1] - self.flow.block.add_child(exc_descr.entry_point) - self.flow.nextblock() - - def mark_position(self, node): - """Mark position if DOT output is enabled.""" - if self.current_directives['control_flow.dot_output']: - self.flow.mark_position(node) - - def visit_FromImportStatNode(self, node): - for name, target in node.items: - if name != "*": - self.mark_assignment(target) - self.visitchildren(node) - return node - - def visit_AssignmentNode(self, node): - raise InternalError("Unhandled assignment node") - - def visit_SingleAssignmentNode(self, node): - self._visit(node.rhs) - self.mark_assignment(node.lhs, node.rhs) - return node - - def visit_CascadedAssignmentNode(self, node): - self._visit(node.rhs) - for lhs in node.lhs_list: - self.mark_assignment(lhs, node.rhs) - return node - - def visit_ParallelAssignmentNode(self, node): - collector = AssignmentCollector() - collector.visitchildren(node) - for lhs, rhs in collector.assignments: - self._visit(rhs) - for lhs, rhs in collector.assignments: - self.mark_assignment(lhs, rhs) - return node - - def visit_InPlaceAssignmentNode(self, node): - self.in_inplace_assignment = True - self.visitchildren(node) - self.in_inplace_assignment = False - self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node())) - return node - - def visit_DelStatNode(self, node): - for arg in node.args: - if arg.is_name: - entry = arg.entry or self.env.lookup(arg.name) - if entry.in_closure or entry.from_closure: - error(arg.pos, - "can not delete variable '%s' " - "referenced in nested scope" % entry.name) - if not node.ignore_nonexisting: - self._visit(arg) # mark reference - self.flow.mark_deletion(arg, entry) - else: - self._visit(arg) - return node - - def visit_CArgDeclNode(self, node): - entry = self.env.lookup(node.name) - if entry: - may_be_none = not node.not_none - self.flow.mark_argument( - node, TypedExprNode(entry.type, may_be_none), entry) - return node - - def visit_NameNode(self, node): - if self.flow.block: - entry = node.entry or self.env.lookup(node.name) - if entry: - self.flow.mark_reference(node, entry) - - if entry in self.reductions and not self.in_inplace_assignment: - error(node.pos, - "Cannot read reduction variable in loop body") - - return node - - def visit_StatListNode(self, node): - if self.flow.block: - for stat in node.stats: - self._visit(stat) - if not self.flow.block: - stat.is_terminator = True - break - return node - - def visit_Node(self, node): - self.visitchildren(node) - self.mark_position(node) - return node - - def visit_SizeofVarNode(self, node): - return node - - def visit_TypeidNode(self, node): - return node - - def visit_IfStatNode(self, node): - next_block = self.flow.newblock() - parent = self.flow.block - # If clauses - for clause in node.if_clauses: - parent = self.flow.nextblock(parent) - self._visit(clause.condition) - self.flow.nextblock() - self._visit(clause.body) - if self.flow.block: - self.flow.block.add_child(next_block) - # Else clause - if node.else_clause: - self.flow.nextblock(parent=parent) - self._visit(node.else_clause) - if self.flow.block: - self.flow.block.add_child(next_block) - else: - parent.add_child(next_block) - - if next_block.parents: - self.flow.block = next_block - else: - self.flow.block = None - return node - - def visit_WhileStatNode(self, node): - condition_block = self.flow.nextblock() - next_block = self.flow.newblock() - # Condition block - self.flow.loops.append(LoopDescr(next_block, condition_block)) - if node.condition: - self._visit(node.condition) - # Body block - self.flow.nextblock() - self._visit(node.body) - self.flow.loops.pop() - # Loop it - if self.flow.block: - self.flow.block.add_child(condition_block) - self.flow.block.add_child(next_block) - # Else clause - if node.else_clause: - self.flow.nextblock(parent=condition_block) - self._visit(node.else_clause) - if self.flow.block: - self.flow.block.add_child(next_block) - else: - condition_block.add_child(next_block) - - if next_block.parents: - self.flow.block = next_block - else: - self.flow.block = None - return node - - def mark_forloop_target(self, node): - # TODO: Remove redundancy with range optimization... - is_special = False - sequence = node.iterator.sequence - target = node.target - if isinstance(sequence, ExprNodes.SimpleCallNode): - function = sequence.function - if sequence.self is None and function.is_name: - entry = self.env.lookup(function.name) - if not entry or entry.is_builtin: - if function.name == 'reversed' and len(sequence.args) == 1: - sequence = sequence.args[0] - elif function.name == 'enumerate' and len(sequence.args) == 1: - if target.is_sequence_constructor and len(target.args) == 2: - iterator = sequence.args[0] - if iterator.is_name: - iterator_type = iterator.infer_type(self.env) - if iterator_type.is_builtin_type: - # assume that builtin types have a length within Py_ssize_t - self.mark_assignment( - target.args[0], - ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX', - type=PyrexTypes.c_py_ssize_t_type)) - target = target.args[1] - sequence = sequence.args[0] - if isinstance(sequence, ExprNodes.SimpleCallNode): - function = sequence.function - if sequence.self is None and function.is_name: - entry = self.env.lookup(function.name) - if not entry or entry.is_builtin: - if function.name in ('range', 'xrange'): - is_special = True - for arg in sequence.args[:2]: - self.mark_assignment(target, arg) - if len(sequence.args) > 2: - self.mark_assignment(target, self.constant_folder( - ExprNodes.binop_node(node.pos, - '+', - sequence.args[0], - sequence.args[2]))) - - if not is_special: - # A for-loop basically translates to subsequent calls to - # __getitem__(), so using an IndexNode here allows us to - # naturally infer the base type of pointers, C arrays, - # Python strings, etc., while correctly falling back to an - # object type when the base type cannot be handled. - - self.mark_assignment(target, node.item) - - def visit_AsyncForStatNode(self, node): - return self.visit_ForInStatNode(node) - - def visit_ForInStatNode(self, node): - condition_block = self.flow.nextblock() - next_block = self.flow.newblock() - # Condition with iterator - self.flow.loops.append(LoopDescr(next_block, condition_block)) - self._visit(node.iterator) - # Target assignment - self.flow.nextblock() - - if isinstance(node, Nodes.ForInStatNode): - self.mark_forloop_target(node) - elif isinstance(node, Nodes.AsyncForStatNode): - # not entirely correct, but good enough for now - self.mark_assignment(node.target, node.item) - else: # Parallel - self.mark_assignment(node.target) - - # Body block - if isinstance(node, Nodes.ParallelRangeNode): - # In case of an invalid - self._delete_privates(node, exclude=node.target.entry) - - self.flow.nextblock() - self._visit(node.body) - self.flow.loops.pop() - - # Loop it - if self.flow.block: - self.flow.block.add_child(condition_block) - # Else clause - if node.else_clause: - self.flow.nextblock(parent=condition_block) - self._visit(node.else_clause) - if self.flow.block: - self.flow.block.add_child(next_block) - else: - condition_block.add_child(next_block) - - if next_block.parents: - self.flow.block = next_block - else: - self.flow.block = None - return node - - def _delete_privates(self, node, exclude=None): - for private_node in node.assigned_nodes: - if not exclude or private_node.entry is not exclude: - self.flow.mark_deletion(private_node, private_node.entry) - - def visit_ParallelRangeNode(self, node): - reductions = self.reductions - - # if node.target is None or not a NameNode, an error will have - # been previously issued - if hasattr(node.target, 'entry'): - self.reductions = set(reductions) - - for private_node in node.assigned_nodes: - private_node.entry.error_on_uninitialized = True - pos, reduction = node.assignments[private_node.entry] - if reduction: - self.reductions.add(private_node.entry) - - node = self.visit_ForInStatNode(node) - - self.reductions = reductions - return node - - def visit_ParallelWithBlockNode(self, node): - for private_node in node.assigned_nodes: - private_node.entry.error_on_uninitialized = True - - self._delete_privates(node) - self.visitchildren(node) - self._delete_privates(node) - - return node - - def visit_ForFromStatNode(self, node): - condition_block = self.flow.nextblock() - next_block = self.flow.newblock() - # Condition with iterator - self.flow.loops.append(LoopDescr(next_block, condition_block)) - self._visit(node.bound1) - self._visit(node.bound2) - if node.step is not None: - self._visit(node.step) - # Target assignment - self.flow.nextblock() - self.mark_assignment(node.target, node.bound1) - if node.step is not None: - self.mark_assignment(node.target, self.constant_folder( - ExprNodes.binop_node(node.pos, '+', node.bound1, node.step))) - # Body block - self.flow.nextblock() - self._visit(node.body) - self.flow.loops.pop() - # Loop it - if self.flow.block: - self.flow.block.add_child(condition_block) - # Else clause - if node.else_clause: - self.flow.nextblock(parent=condition_block) - self._visit(node.else_clause) - if self.flow.block: - self.flow.block.add_child(next_block) - else: - condition_block.add_child(next_block) - - if next_block.parents: - self.flow.block = next_block - else: - self.flow.block = None - return node - - def visit_LoopNode(self, node): - raise InternalError("Generic loops are not supported") - - def visit_WithTargetAssignmentStatNode(self, node): - self.mark_assignment(node.lhs, node.with_node.enter_call) - return node - - def visit_WithStatNode(self, node): - self._visit(node.manager) - self._visit(node.enter_call) - self._visit(node.body) - return node - - def visit_TryExceptStatNode(self, node): - # After exception handling - next_block = self.flow.newblock() - # Body block - self.flow.newblock() - # Exception entry point - entry_point = self.flow.newblock() - self.flow.exceptions.append(ExceptionDescr(entry_point)) - self.flow.nextblock() - ## XXX: links to exception handling point should be added by - ## XXX: children nodes - self.flow.block.add_child(entry_point) - self.flow.nextblock() - self._visit(node.body) - self.flow.exceptions.pop() - - # After exception - if self.flow.block: - if node.else_clause: - self.flow.nextblock() - self._visit(node.else_clause) - if self.flow.block: - self.flow.block.add_child(next_block) - - for clause in node.except_clauses: - self.flow.block = entry_point - if clause.pattern: - for pattern in clause.pattern: - self._visit(pattern) - else: - # TODO: handle * pattern - pass - entry_point = self.flow.newblock(parent=self.flow.block) - self.flow.nextblock() - if clause.target: - self.mark_assignment(clause.target) - self._visit(clause.body) - if self.flow.block: - self.flow.block.add_child(next_block) - - if self.flow.exceptions: - entry_point.add_child(self.flow.exceptions[-1].entry_point) - - if next_block.parents: - self.flow.block = next_block - else: - self.flow.block = None - return node - - def visit_TryFinallyStatNode(self, node): - body_block = self.flow.nextblock() - - # Exception entry point - entry_point = self.flow.newblock() - self.flow.block = entry_point - self._visit(node.finally_except_clause) - - if self.flow.block and self.flow.exceptions: - self.flow.block.add_child(self.flow.exceptions[-1].entry_point) - - # Normal execution - finally_enter = self.flow.newblock() - self.flow.block = finally_enter - self._visit(node.finally_clause) - finally_exit = self.flow.block - - descr = ExceptionDescr(entry_point, finally_enter, finally_exit) - self.flow.exceptions.append(descr) - if self.flow.loops: - self.flow.loops[-1].exceptions.append(descr) - self.flow.block = body_block - body_block.add_child(entry_point) - self.flow.nextblock() - self._visit(node.body) - self.flow.exceptions.pop() - if self.flow.loops: - self.flow.loops[-1].exceptions.pop() - - if self.flow.block: - self.flow.block.add_child(finally_enter) - if finally_exit: - self.flow.block = self.flow.nextblock(parent=finally_exit) - else: - self.flow.block = None - return node - - def visit_RaiseStatNode(self, node): - self.mark_position(node) - self.visitchildren(node) - if self.flow.exceptions: - self.flow.block.add_child(self.flow.exceptions[-1].entry_point) - self.flow.block = None - return node - - def visit_ReraiseStatNode(self, node): - self.mark_position(node) - if self.flow.exceptions: - self.flow.block.add_child(self.flow.exceptions[-1].entry_point) - self.flow.block = None - return node - - def visit_ReturnStatNode(self, node): - self.mark_position(node) - self.visitchildren(node) - - outer_exception_handlers = iter(self.flow.exceptions[::-1]) - for handler in outer_exception_handlers: - if handler.finally_enter: - self.flow.block.add_child(handler.finally_enter) - if handler.finally_exit: - # 'return' goes to function exit, or to the next outer 'finally' clause - exit_point = self.flow.exit_point - for next_handler in outer_exception_handlers: - if next_handler.finally_enter: - exit_point = next_handler.finally_enter - break - handler.finally_exit.add_child(exit_point) - break - else: - if self.flow.block: - self.flow.block.add_child(self.flow.exit_point) - self.flow.block = None - return node - - def visit_BreakStatNode(self, node): - if not self.flow.loops: - #error(node.pos, "break statement not inside loop") - return node - loop = self.flow.loops[-1] - self.mark_position(node) - for exception in loop.exceptions[::-1]: - if exception.finally_enter: - self.flow.block.add_child(exception.finally_enter) - if exception.finally_exit: - exception.finally_exit.add_child(loop.next_block) - break - else: - self.flow.block.add_child(loop.next_block) - self.flow.block = None - return node - - def visit_ContinueStatNode(self, node): - if not self.flow.loops: - #error(node.pos, "continue statement not inside loop") - return node - loop = self.flow.loops[-1] - self.mark_position(node) - for exception in loop.exceptions[::-1]: - if exception.finally_enter: - self.flow.block.add_child(exception.finally_enter) - if exception.finally_exit: - exception.finally_exit.add_child(loop.loop_block) - break - else: - self.flow.block.add_child(loop.loop_block) - self.flow.block = None - return node - - def visit_ComprehensionNode(self, node): - if node.expr_scope: - self.env_stack.append(self.env) - self.env = node.expr_scope - # Skip append node here - self._visit(node.loop) - if node.expr_scope: - self.env = self.env_stack.pop() - return node - - def visit_ScopedExprNode(self, node): - if node.expr_scope: - self.env_stack.append(self.env) - self.env = node.expr_scope - self.visitchildren(node) - if node.expr_scope: - self.env = self.env_stack.pop() - return node - - def visit_PyClassDefNode(self, node): - self.visitchildren(node, ('dict', 'metaclass', - 'mkw', 'bases', 'class_result')) - self.flow.mark_assignment(node.target, node.classobj, - self.env.lookup(node.name)) - self.env_stack.append(self.env) - self.env = node.scope - self.flow.nextblock() - self.visitchildren(node, ('body',)) - self.flow.nextblock() - self.env = self.env_stack.pop() - return node - - def visit_AmpersandNode(self, node): - if node.operand.is_name: - # Fake assignment to silence warning - self.mark_assignment(node.operand, fake_rhs_expr) - self.visitchildren(node) - return node diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Pythran.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Pythran.py deleted file mode 100644 index c02704a918ce6cb4e83ef28b78f678a748a022c3..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Pythran.py +++ /dev/null @@ -1,227 +0,0 @@ -# cython: language_level=3 - -from __future__ import absolute_import - -from .PyrexTypes import CType, CTypedefType, CStructOrUnionType - -import cython - -try: - import pythran - pythran_is_pre_0_9 = tuple(map(int, pythran.__version__.split('.')[0:2])) < (0, 9) - pythran_is_pre_0_9_6 = tuple(map(int, pythran.__version__.split('.')[0:3])) < (0, 9, 6) -except ImportError: - pythran = None - pythran_is_pre_0_9 = True - pythran_is_pre_0_9_6 = True - -if pythran_is_pre_0_9_6: - pythran_builtins = '__builtin__' -else: - pythran_builtins = 'builtins' - - -# Pythran/Numpy specific operations - -def has_np_pythran(env): - if env is None: - return False - directives = getattr(env, 'directives', None) - return (directives and directives.get('np_pythran', False)) - -@cython.ccall -def is_pythran_supported_dtype(type_): - if isinstance(type_, CTypedefType): - return is_pythran_supported_type(type_.typedef_base_type) - return type_.is_numeric - - -def pythran_type(Ty, ptype="ndarray"): - if Ty.is_buffer: - ndim,dtype = Ty.ndim, Ty.dtype - if isinstance(dtype, CStructOrUnionType): - ctype = dtype.cname - elif isinstance(dtype, CType): - ctype = dtype.sign_and_name() - elif isinstance(dtype, CTypedefType): - ctype = dtype.typedef_cname - else: - raise ValueError("unsupported type %s!" % dtype) - if pythran_is_pre_0_9: - return "pythonic::types::%s<%s,%d>" % (ptype,ctype, ndim) - else: - return "pythonic::types::%s<%s,pythonic::types::pshape<%s>>" % (ptype,ctype, ",".join(("long",)*ndim)) - if Ty.is_pythran_expr: - return Ty.pythran_type - #if Ty.is_none: - # return "decltype(pythonic::builtins::None)" - if Ty.is_numeric: - return Ty.sign_and_name() - raise ValueError("unsupported pythran type %s (%s)" % (Ty, type(Ty))) - - -@cython.cfunc -def type_remove_ref(ty): - return "typename std::remove_reference<%s>::type" % ty - - -def pythran_binop_type(op, tA, tB): - if op == '**': - return 'decltype(pythonic::numpy::functor::power{}(std::declval<%s>(), std::declval<%s>()))' % ( - pythran_type(tA), pythran_type(tB)) - else: - return "decltype(std::declval<%s>() %s std::declval<%s>())" % ( - pythran_type(tA), op, pythran_type(tB)) - - -def pythran_unaryop_type(op, type_): - return "decltype(%sstd::declval<%s>())" % ( - op, pythran_type(type_)) - - -@cython.cfunc -def _index_access(index_code, indices): - indexing = ",".join([index_code(idx) for idx in indices]) - return ('[%s]' if len(indices) == 1 else '(%s)') % indexing - - -def _index_type_code(index_with_type): - idx, index_type = index_with_type - if idx.is_slice: - n = 2 + int(not idx.step.is_none) - return "pythonic::%s::functor::slice{}(%s)" % ( - pythran_builtins, - ",".join(["0"]*n)) - elif index_type.is_int: - return "std::declval<%s>()" % index_type.sign_and_name() - elif index_type.is_pythran_expr: - return "std::declval<%s>()" % index_type.pythran_type - raise ValueError("unsupported indexing type %s!" % index_type) - - -def _index_code(idx): - if idx.is_slice: - values = idx.start, idx.stop, idx.step - if idx.step.is_none: - func = "contiguous_slice" - values = values[:2] - else: - func = "slice" - return "pythonic::types::%s(%s)" % ( - func, ",".join((v.pythran_result() for v in values))) - elif idx.type.is_int: - return to_pythran(idx) - elif idx.type.is_pythran_expr: - return idx.pythran_result() - raise ValueError("unsupported indexing type %s" % idx.type) - - -def pythran_indexing_type(type_, indices): - return type_remove_ref("decltype(std::declval<%s>()%s)" % ( - pythran_type(type_), - _index_access(_index_type_code, indices), - )) - - -def pythran_indexing_code(indices): - return _index_access(_index_code, indices) - -def np_func_to_list(func): - if not func.is_numpy_attribute: - return [] - return np_func_to_list(func.obj) + [func.attribute] - -if pythran is None: - def pythran_is_numpy_func_supported(name): - return False -else: - def pythran_is_numpy_func_supported(func): - CurF = pythran.tables.MODULES['numpy'] - FL = np_func_to_list(func) - for F in FL: - CurF = CurF.get(F, None) - if CurF is None: - return False - return True - -def pythran_functor(func): - func = np_func_to_list(func) - submodules = "::".join(func[:-1] + ["functor"]) - return "pythonic::numpy::%s::%s" % (submodules, func[-1]) - -def pythran_func_type(func, args): - args = ",".join(("std::declval<%s>()" % pythran_type(a.type) for a in args)) - return "decltype(%s{}(%s))" % (pythran_functor(func), args) - - -@cython.ccall -def to_pythran(op, ptype=None): - op_type = op.type - if op_type.is_int: - # Make sure that integer literals always have exactly the type that the templates expect. - return op_type.cast_code(op.result()) - if is_type(op_type, ["is_pythran_expr", "is_numeric", "is_float", "is_complex"]): - return op.result() - if op.is_none: - return "pythonic::%s::None" % pythran_builtins - if ptype is None: - ptype = pythran_type(op_type) - - assert op.type.is_pyobject - return "from_python<%s>(%s)" % (ptype, op.py_result()) - - -@cython.cfunc -def is_type(type_, types): - for attr in types: - if getattr(type_, attr, False): - return True - return False - - -def is_pythran_supported_node_or_none(node): - return node.is_none or is_pythran_supported_type(node.type) - - -@cython.ccall -def is_pythran_supported_type(type_): - pythran_supported = ( - "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_none", "is_complex") - return is_type(type_, pythran_supported) or is_pythran_expr(type_) - - -def is_pythran_supported_operation_type(type_): - pythran_supported = ( - "is_pythran_expr", "is_int", "is_numeric", "is_float", "is_complex") - return is_type(type_,pythran_supported) or is_pythran_expr(type_) - - -@cython.ccall -def is_pythran_expr(type_): - return type_.is_pythran_expr - - -def is_pythran_buffer(type_): - return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and - type_.mode in ("c", "strided") and not type_.cast) - -def pythran_get_func_include_file(func): - func = np_func_to_list(func) - return "pythonic/numpy/%s.hpp" % "/".join(func) - -def include_pythran_generic(env): - # Generic files - env.add_include_file("pythonic/core.hpp") - env.add_include_file("pythonic/python/core.hpp") - env.add_include_file("pythonic/types/bool.hpp") - env.add_include_file("pythonic/types/ndarray.hpp") - env.add_include_file("pythonic/numpy/power.hpp") - env.add_include_file("pythonic/%s/slice.hpp" % pythran_builtins) - env.add_include_file("") # for placement new - - for i in (8, 16, 32, 64): - env.add_include_file("pythonic/types/uint%d.hpp" % i) - env.add_include_file("pythonic/types/int%d.hpp" % i) - for t in ("float", "float32", "float64", "set", "slice", "tuple", "int", - "complex", "complex64", "complex128"): - env.add_include_file("pythonic/types/%s.hpp" % t) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/audioread/ffdec.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/audioread/ffdec.py deleted file mode 100644 index ad4f448884ceddcd2f5eb7ce12fdd08bb5af893c..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/audioread/ffdec.py +++ /dev/null @@ -1,327 +0,0 @@ -# This file is part of audioread. -# Copyright 2014, Adrian Sampson. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. - -"""Read audio data using the ffmpeg command line tool via its standard -output. -""" - -import sys -import subprocess -import re -import threading -import time -import os -try: - import queue -except ImportError: - import Queue as queue - -from .exceptions import DecodeError - -COMMANDS = ('ffmpeg', 'avconv') - -if sys.platform == "win32": - PROC_FLAGS = 0x08000000 -else: - PROC_FLAGS = 0 - - -class FFmpegError(DecodeError): - pass - - -class CommunicationError(FFmpegError): - """Raised when the output of FFmpeg is not parseable.""" - - -class UnsupportedError(FFmpegError): - """The file could not be decoded by FFmpeg.""" - - -class NotInstalledError(FFmpegError): - """Could not find the ffmpeg binary.""" - - -class ReadTimeoutError(FFmpegError): - """Reading from the ffmpeg command-line tool timed out.""" - - -class QueueReaderThread(threading.Thread): - """A thread that consumes data from a filehandle and sends the data - over a Queue. - """ - def __init__(self, fh, blocksize=1024, discard=False): - super(QueueReaderThread, self).__init__() - self.fh = fh - self.blocksize = blocksize - self.daemon = True - self.discard = discard - self.queue = None if discard else queue.Queue() - - def run(self): - while True: - data = self.fh.read(self.blocksize) - if not self.discard: - self.queue.put(data) - if not data: - # Stream closed (EOF). - break - - -def popen_multiple(commands, command_args, *args, **kwargs): - """Like `subprocess.Popen`, but can try multiple commands in case - some are not available. - - `commands` is an iterable of command names and `command_args` are - the rest of the arguments that, when appended to the command name, - make up the full first argument to `subprocess.Popen`. The - other positional and keyword arguments are passed through. - """ - for i, command in enumerate(commands): - cmd = [command] + command_args - try: - return subprocess.Popen(cmd, *args, **kwargs) - except OSError: - if i == len(commands) - 1: - # No more commands to try. - raise - - -def available(): - """Detect whether the FFmpeg backend can be used on this system. - """ - try: - proc = popen_multiple( - COMMANDS, - ['-version'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - creationflags=PROC_FLAGS, - ) - except OSError: - return False - else: - proc.wait() - return proc.returncode == 0 - - -# For Windows error switch management, we need a lock to keep the mode -# adjustment atomic. -windows_error_mode_lock = threading.Lock() - - -class FFmpegAudioFile(object): - """An audio file decoded by the ffmpeg command-line utility.""" - def __init__(self, filename, block_size=4096): - # On Windows, we need to disable the subprocess's crash dialog - # in case it dies. Passing SEM_NOGPFAULTERRORBOX to SetErrorMode - # disables this behavior. - windows = sys.platform.startswith("win") - if windows: - windows_error_mode_lock.acquire() - SEM_NOGPFAULTERRORBOX = 0x0002 - import ctypes - # We call SetErrorMode in two steps to avoid overriding - # existing error mode. - previous_error_mode = \ - ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX) - ctypes.windll.kernel32.SetErrorMode( - previous_error_mode | SEM_NOGPFAULTERRORBOX - ) - - try: - self.devnull = open(os.devnull) - self.proc = popen_multiple( - COMMANDS, - ['-i', filename, '-f', 's16le', '-'], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=self.devnull, - creationflags=PROC_FLAGS, - ) - - except OSError: - raise NotInstalledError() - - finally: - # Reset previous error mode on Windows. (We can change this - # back now because the flag was inherited by the subprocess; - # we don't need to keep it set in the parent process.) - if windows: - try: - import ctypes - ctypes.windll.kernel32.SetErrorMode(previous_error_mode) - finally: - windows_error_mode_lock.release() - - # Start another thread to consume the standard output of the - # process, which contains raw audio data. - self.stdout_reader = QueueReaderThread(self.proc.stdout, block_size) - self.stdout_reader.start() - - # Read relevant information from stderr. - self._get_info() - - # Start a separate thread to read the rest of the data from - # stderr. This (a) avoids filling up the OS buffer and (b) - # collects the error output for diagnosis. - self.stderr_reader = QueueReaderThread(self.proc.stderr) - self.stderr_reader.start() - - def read_data(self, timeout=10.0): - """Read blocks of raw PCM data from the file.""" - # Read from stdout in a separate thread and consume data from - # the queue. - start_time = time.time() - while True: - # Wait for data to be available or a timeout. - data = None - try: - data = self.stdout_reader.queue.get(timeout=timeout) - if data: - yield data - else: - # End of file. - break - except queue.Empty: - # Queue read timed out. - end_time = time.time() - if not data: - if end_time - start_time >= timeout: - # Nothing interesting has happened for a while -- - # FFmpeg is probably hanging. - raise ReadTimeoutError('ffmpeg output: {}'.format( - b''.join(self.stderr_reader.queue.queue) - )) - else: - start_time = end_time - # Keep waiting. - continue - - def _get_info(self): - """Reads the tool's output from its stderr stream, extracts the - relevant information, and parses it. - """ - out_parts = [] - while True: - line = self.proc.stderr.readline() - if not line: - # EOF and data not found. - raise CommunicationError("stream info not found") - - # In Python 3, result of reading from stderr is bytes. - if isinstance(line, bytes): - line = line.decode('utf8', 'ignore') - - line = line.strip().lower() - - if 'no such file' in line: - raise IOError('file not found') - elif 'invalid data found' in line: - raise UnsupportedError() - elif 'duration:' in line: - out_parts.append(line) - elif 'audio:' in line: - out_parts.append(line) - self._parse_info(''.join(out_parts)) - break - - def _parse_info(self, s): - """Given relevant data from the ffmpeg output, set audio - parameter fields on this object. - """ - # Sample rate. - match = re.search(r'(\d+) hz', s) - if match: - self.samplerate = int(match.group(1)) - else: - self.samplerate = 0 - - # Channel count. - match = re.search(r'hz, ([^,]+),', s) - if match: - mode = match.group(1) - if mode == 'stereo': - self.channels = 2 - else: - cmatch = re.match(r'(\d+)\.?(\d)?', mode) - if cmatch: - self.channels = sum(map(int, cmatch.group().split('.'))) - else: - self.channels = 1 - else: - self.channels = 0 - - # Duration. - match = re.search( - r'duration: (\d+):(\d+):(\d+).(\d)', s - ) - if match: - durparts = list(map(int, match.groups())) - duration = ( - durparts[0] * 60 * 60 + - durparts[1] * 60 + - durparts[2] + - float(durparts[3]) / 10 - ) - self.duration = duration - else: - # No duration found. - self.duration = 0 - - def close(self): - """Close the ffmpeg process used to perform the decoding.""" - if hasattr(self, 'proc'): - # First check the process's execution status before attempting to - # kill it. This fixes an issue on Windows Subsystem for Linux where - # ffmpeg closes normally on its own, but never updates - # `returncode`. - self.proc.poll() - - # Kill the process if it is still running. - if self.proc.returncode is None: - self.proc.kill() - self.proc.wait() - - # Wait for the stream-reading threads to exit. (They need to - # stop reading before we can close the streams.) - if hasattr(self, 'stderr_reader'): - self.stderr_reader.join() - if hasattr(self, 'stdout_reader'): - self.stdout_reader.join() - - # Close the stdout and stderr streams that were opened by Popen, - # which should occur regardless of if the process terminated - # cleanly. - self.proc.stdout.close() - self.proc.stderr.close() - - # Close the handle to os.devnull, which is opened regardless of if - # a subprocess is successfully created. - self.devnull.close() - - def __del__(self): - self.close() - - # Iteration. - def __iter__(self): - return self.read_data() - - # Context manager. - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - return False diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/dataclass/configs.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/dataclass/configs.py deleted file mode 100644 index 3079101db39fb424e6bc568963055c89ab6f2f41..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/dataclass/configs.py +++ /dev/null @@ -1,1124 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import os -import sys -from dataclasses import _MISSING_TYPE, dataclass, field -from typing import Any, List, Optional - -import torch -from omegaconf import II, MISSING - -from fairseq.dataclass.constants import ( - DATASET_IMPL_CHOICES, - DDP_BACKEND_CHOICES, - DDP_COMM_HOOK_CHOICES, - GENERATION_CONSTRAINTS_CHOICES, - GENERATION_DECODING_FORMAT_CHOICES, - LOG_FORMAT_CHOICES, - PIPELINE_CHECKPOINT_CHOICES, - PRINT_ALIGNMENT_CHOICES, - ZERO_SHARDING_CHOICES, -) - - -@dataclass -class FairseqDataclass: - """fairseq base dataclass that supported fetching attributes and metas""" - - _name: Optional[str] = None - - @staticmethod - def name(): - return None - - def _get_all_attributes(self) -> List[str]: - return [k for k in self.__dataclass_fields__.keys()] - - def _get_meta( - self, attribute_name: str, meta: str, default: Optional[Any] = None - ) -> Any: - return self.__dataclass_fields__[attribute_name].metadata.get(meta, default) - - def _get_name(self, attribute_name: str) -> str: - return self.__dataclass_fields__[attribute_name].name - - def _get_default(self, attribute_name: str) -> Any: - if hasattr(self, attribute_name): - if str(getattr(self, attribute_name)).startswith("${"): - return str(getattr(self, attribute_name)) - elif str(self.__dataclass_fields__[attribute_name].default).startswith( - "${" - ): - return str(self.__dataclass_fields__[attribute_name].default) - elif ( - getattr(self, attribute_name) - != self.__dataclass_fields__[attribute_name].default - ): - return getattr(self, attribute_name) - - f = self.__dataclass_fields__[attribute_name] - if not isinstance(f.default_factory, _MISSING_TYPE): - return f.default_factory() - return f.default - - def _get_type(self, attribute_name: str) -> Any: - return self.__dataclass_fields__[attribute_name].type - - def _get_help(self, attribute_name: str) -> Any: - return self._get_meta(attribute_name, "help") - - def _get_argparse_const(self, attribute_name: str) -> Any: - return self._get_meta(attribute_name, "argparse_const") - - def _get_argparse_alias(self, attribute_name: str) -> Any: - return self._get_meta(attribute_name, "argparse_alias") - - def _get_choices(self, attribute_name: str) -> Any: - return self._get_meta(attribute_name, "choices") - - @classmethod - def from_namespace(cls, args): - if isinstance(args, cls): - return args - else: - config = cls() - for k in config.__dataclass_fields__.keys(): - if k.startswith("_"): - # private member, skip - continue - if hasattr(args, k): - setattr(config, k, getattr(args, k)) - - return config - - -@dataclass -class CommonConfig(FairseqDataclass): - # This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were - # used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc. - no_progress_bar: bool = field( - default=False, metadata={"help": "disable progress bar"} - ) - log_interval: int = field( - default=100, - metadata={ - "help": "log progress every N batches (when progress bar is disabled)" - }, - ) - log_format: Optional[LOG_FORMAT_CHOICES] = field( - default=None, metadata={"help": "log format to use"} - ) - log_file: Optional[str] = field( - default=None, metadata={"help": "log file to copy metrics to."} - ) - aim_repo: Optional[str] = field( - default=None, - metadata={"help": "path to Aim repository"}, - ) - aim_run_hash: Optional[str] = field( - default=None, - metadata={ - "help": "Aim run hash. If skipped, creates or continues run " - "based on save_dir" - }, - ) - tensorboard_logdir: Optional[str] = field( - default=None, - metadata={ - "help": "path to save logs for tensorboard, should match --logdir " - "of running tensorboard (default: no tensorboard logging)" - }, - ) - wandb_project: Optional[str] = field( - default=None, - metadata={"help": "Weights and Biases project name to use for logging"}, - ) - azureml_logging: Optional[bool] = field( - default=False, - metadata={"help": "Log scalars to AzureML context"}, - ) - seed: int = field( - default=1, metadata={"help": "pseudo random number generator seed"} - ) - cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"}) - tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"}) - bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"}) - memory_efficient_bf16: bool = field( - default=False, - metadata={ - "help": "use a memory-efficient version of BF16 training; implies --bf16" - }, - ) - fp16: bool = field(default=False, metadata={"help": "use FP16"}) - memory_efficient_fp16: bool = field( - default=False, - metadata={ - "help": "use a memory-efficient version of FP16 training; implies --fp16" - }, - ) - fp16_no_flatten_grads: bool = field( - default=False, metadata={"help": "don't flatten FP16 grads tensor"} - ) - fp16_init_scale: int = field( - default=2**7, metadata={"help": "default FP16 loss scale"} - ) - fp16_scale_window: Optional[int] = field( - default=None, - metadata={"help": "number of updates before increasing loss scale"}, - ) - fp16_scale_tolerance: float = field( - default=0.0, - metadata={ - "help": "pct of updates that can overflow before decreasing the loss scale" - }, - ) - on_cpu_convert_precision: bool = field( - default=False, - metadata={ - "help": "if set, the floating point conversion to fp16/bf16 runs on CPU. " - "This reduces bus transfer time and GPU memory usage." - }, - ) - min_loss_scale: float = field( - default=1e-4, - metadata={ - "help": "minimum FP16/AMP loss scale, after which training is stopped" - }, - ) - threshold_loss_scale: Optional[float] = field( - default=None, metadata={"help": "threshold FP16 loss scale from below"} - ) - amp: bool = field(default=False, metadata={"help": "use automatic mixed precision"}) - amp_batch_retries: int = field( - default=2, - metadata={ - "help": "number of retries of same batch after reducing loss scale with AMP" - }, - ) - amp_init_scale: int = field( - default=2**7, metadata={"help": "default AMP loss scale"} - ) - amp_scale_window: Optional[int] = field( - default=None, - metadata={"help": "number of updates before increasing AMP loss scale"}, - ) - user_dir: Optional[str] = field( - default=None, - metadata={ - "help": "path to a python module containing custom extensions (tasks and/or architectures)" - }, - ) - empty_cache_freq: int = field( - default=0, - metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"}, - ) - all_gather_list_size: int = field( - default=16384, - metadata={"help": "number of bytes reserved for gathering stats from workers"}, - ) - model_parallel_size: int = field( - default=1, metadata={"help": "total number of GPUs to parallelize model over"} - ) - quantization_config_path: Optional[str] = field( - default=None, metadata={"help": "path to quantization config file"} - ) - profile: bool = field( - default=False, metadata={"help": "enable autograd profiler emit_nvtx"} - ) - reset_logging: bool = field( - default=False, - metadata={ - "help": "when using Hydra, reset the logging at the beginning of training" - }, - ) - suppress_crashes: bool = field( - default=False, - metadata={ - "help": "suppress crashes when training with the hydra_train entry point so that the " - "main method can return a value (useful for sweeps)" - }, - ) - use_plasma_view: bool = field( - default=False, metadata={"help": "Store indices and sizes in shared memory"} - ) - plasma_path: Optional[str] = field( - default="/tmp/plasma", - metadata={ - "help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail." - }, - ) - - -@dataclass -class DistributedTrainingConfig(FairseqDataclass): - distributed_world_size: int = field( - default=max(1, torch.cuda.device_count()), - metadata={ - "help": "total number of GPUs across all nodes (default: all visible GPUs)" - }, - ) - distributed_num_procs: Optional[int] = field( - default=max(1, torch.cuda.device_count()), - metadata={ - "help": "total number of processes to fork (default: all visible GPUs)" - }, - ) - distributed_rank: Optional[int] = field( - default=0, metadata={"help": "rank of the current worker"} - ) - distributed_backend: str = field( - default="nccl", metadata={"help": "distributed backend"} - ) - distributed_init_method: Optional[str] = field( - default=None, - metadata={ - "help": "typically tcp://hostname:port that will be used to " - "establish initial connetion" - }, - ) - distributed_port: int = field( - default=-1, - metadata={ - "help": "port number (not required if using --distributed-init-method)" - }, - ) - device_id: int = field( - default=os.getenv("LOCAL_RANK", 0), - metadata={ - "help": "which GPU to use (by default looks for $LOCAL_RANK, usually configured automatically)", - "argparse_alias": "--local_rank", - }, - ) - distributed_no_spawn: bool = field( - default=False, - metadata={ - "help": "do not spawn multiple processes even if multiple GPUs are visible" - }, - ) - ddp_backend: DDP_BACKEND_CHOICES = field( - default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"} - ) - ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field( - default="none", metadata={"help": "communication hook"} - ) - bucket_cap_mb: int = field( - default=25, metadata={"help": "bucket size for reduction"} - ) - fix_batches_to_gpus: bool = field( - default=False, - metadata={ - "help": "don't shuffle batches between GPUs; this reduces overall " - "randomness and may affect precision but avoids the cost of re-reading the data" - }, - ) - find_unused_parameters: bool = field( - default=False, - metadata={ - "help": "disable unused parameter detection (not applicable to " - "--ddp-backend=legacy_ddp)" - }, - ) - gradient_as_bucket_view: bool = field( - default=False, - metadata={ - "help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. " - "--gradient-as-bucket-view=gradient_as_bucket_view)" - }, - ) - fast_stat_sync: bool = field( - default=False, - metadata={"help": "[deprecated] this is now defined per Criterion"}, - ) - heartbeat_timeout: int = field( - default=-1, - metadata={ - "help": "kill the job if no progress is made in N seconds; " - "set to -1 to disable" - }, - ) - broadcast_buffers: bool = field( - default=False, - metadata={ - "help": "Copy non-trainable parameters between GPUs, such as " - "batchnorm population statistics" - }, - ) - slowmo_momentum: Optional[float] = field( - default=None, - metadata={ - "help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, " - "0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs" - }, - ) - slowmo_base_algorithm: str = field( - default="localsgd", - metadata={ - "help": "Base algorithm. Either 'localsgd' or 'sgp'. Please refer " - "to the documentation of 'slowmo_base_algorithm' parameter in " - "https://fairscale.readthedocs.io/en/latest/api/experimental/nn/slowmo_ddp.html " - "for more details" - }, - ) - localsgd_frequency: int = field( - default=3, metadata={"help": "Local SGD allreduce frequency"} - ) - nprocs_per_node: int = field( - default=max(1, torch.cuda.device_count()), - metadata={ - "help": "number of GPUs in each node. An allreduce operation across GPUs in " - "a node is very fast. Hence, we do allreduce across GPUs in a node, " - "and gossip across different nodes" - }, - ) - pipeline_model_parallel: bool = field( - default=False, - metadata={"help": "if set, use pipeline model parallelism across GPUs"}, - ) - pipeline_balance: Optional[str] = field( - default=None, - metadata={ - "help": "partition the model into N_K pieces, where each piece " - "contains N_i layers. The sum(args.pipeline_balance) " - "should equal the total number of layers in the model" - }, - ) - pipeline_devices: Optional[str] = field( - default=None, - metadata={ - "help": "a list of device indices indicating which device to place " - "each of the N_K partitions. The length of this list should " - "equal the length of the --pipeline-balance argument" - }, - ) - pipeline_chunks: Optional[int] = field( - default=0, metadata={"help": "microbatch count for pipeline model parallelism"} - ) - pipeline_encoder_balance: Optional[str] = field( - default=None, - metadata={ - "help": "partition the pipeline parallel encoder into N_K pieces, where each piece " - "contains N_i layers. The sum(args.pipeline_encoder_balance) " - "should equal the total number of encoder layers in the model" - }, - ) - pipeline_encoder_devices: Optional[str] = field( - default=None, - metadata={ - "help": "a list of device indices indicating which device to place " - "each of the N_K partitions. The length of this list should " - "equal the length of the --pipeline-encoder-balance argument" - }, - ) - pipeline_decoder_balance: Optional[str] = field( - default=None, - metadata={ - "help": "partition the pipeline parallel decoder into N_K pieces, where each piece " - "contains N_i layers. The sum(args.pipeline_decoder_balance) " - "should equal the total number of decoder layers in the model" - }, - ) - pipeline_decoder_devices: Optional[str] = field( - default=None, - metadata={ - "help": "a list of device indices indicating which device to place " - "each of the N_K partitions. The length of this list should " - "equal the length of the --pipeline-decoder-balance argument" - }, - ) - pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field( - default="never", - metadata={"help": "checkpointing mode for pipeline model parallelism"}, - ) - zero_sharding: ZERO_SHARDING_CHOICES = field( - default="none", metadata={"help": "ZeRO sharding"} - ) - fp16: bool = II("common.fp16") - memory_efficient_fp16: bool = II("common.memory_efficient_fp16") - tpu: bool = II("common.tpu") - # configuration for --ddp-backend=fully_sharded - no_reshard_after_forward: bool = field( - default=False, - metadata={"help": "don't reshard parameters after forward pass"}, - ) - fp32_reduce_scatter: bool = field( - default=False, - metadata={"help": "reduce-scatter grads in FP32"}, - ) - cpu_offload: bool = field( - default=False, metadata={"help": "offload FP32 params to CPU"} - ) - use_sharded_state: bool = field( - default=False, - metadata={"help": "use sharded checkpoint files"}, - ) - not_fsdp_flatten_parameters: bool = field( - default=False, - metadata={"help": "not flatten parameter param for fsdp"}, - ) - - -@dataclass -class DatasetConfig(FairseqDataclass): - num_workers: int = field( - default=1, metadata={"help": "how many subprocesses to use for data loading"} - ) - skip_invalid_size_inputs_valid_test: bool = field( - default=False, - metadata={"help": "ignore too long or too short lines in valid and test set"}, - ) - max_tokens: Optional[int] = field( - default=None, metadata={"help": "maximum number of tokens in a batch"} - ) - batch_size: Optional[int] = field( - default=None, - metadata={ - "help": "number of examples in a batch", - "argparse_alias": "--max-sentences", - }, - ) - required_batch_size_multiple: int = field( - default=8, metadata={"help": "batch size will be a multiplier of this value"} - ) - required_seq_len_multiple: int = field( - default=1, - metadata={ - "help": "maximum sequence length in batch will be a multiplier of this value" - }, - ) - dataset_impl: Optional[DATASET_IMPL_CHOICES] = field( - default=None, metadata={"help": "output dataset implementation"} - ) - data_buffer_size: int = field( - default=10, metadata={"help": "Number of batches to preload"} - ) - train_subset: str = field( - default="train", - metadata={"help": "data subset to use for training (e.g. train, valid, test)"}, - ) - valid_subset: str = field( - default="valid", - metadata={ - "help": "comma separated list of data subsets to use for validation" - " (e.g. train, valid, test)" - }, - ) - combine_valid_subsets: Optional[bool] = field( - default=None, - metadata={ - "help": "comma separated list of data subsets to use for validation" - " (e.g. train, valid, test)", - "argparse_alias": "--combine-val", - }, - ) - ignore_unused_valid_subsets: Optional[bool] = field( - default=False, - metadata={"help": "do not raise error if valid subsets are ignored"}, - ) - - validate_interval: int = field( - default=1, metadata={"help": "validate every N epochs"} - ) - validate_interval_updates: int = field( - default=0, metadata={"help": "validate every N updates"} - ) - validate_after_updates: int = field( - default=0, metadata={"help": "dont validate until reaching this many updates"} - ) - fixed_validation_seed: Optional[int] = field( - default=None, metadata={"help": "specified random seed for validation"} - ) - disable_validation: bool = field( - default=False, metadata={"help": "disable validation"} - ) - max_tokens_valid: Optional[int] = field( - default=II("dataset.max_tokens"), - metadata={ - "help": "maximum number of tokens in a validation batch" - " (defaults to --max-tokens)" - }, - ) - batch_size_valid: Optional[int] = field( - default=II("dataset.batch_size"), - metadata={ - "help": "batch size of the validation batch (defaults to --batch-size)", - "argparse_alias": "--max-sentences-valid", - }, - ) - max_valid_steps: Optional[int] = field( - default=None, - metadata={"help": "How many batches to evaluate", "argparse_alias": "--nval"}, - ) - curriculum: int = field( - default=0, metadata={"help": "don't shuffle batches for first N epochs"} - ) - gen_subset: str = field( - default="test", - metadata={"help": "data subset to generate (train, valid, test)"}, - ) - num_shards: int = field( - default=1, metadata={"help": "shard generation over N shards"} - ) - shard_id: int = field( - default=0, metadata={"help": "id of the shard to generate (id < num_shards)"} - ) - grouped_shuffling: bool = field( - default=False, - metadata={ - "help": "shuffle batches in groups of num_shards to enable similar sequence lengths on each GPU worker when batches are sorted by length", - }, - ) - update_epoch_batch_itr: bool = field( - default=II("dataset.grouped_shuffling"), - metadata={ - "help": "if true then prevents the reuse the epoch batch iterator by setting can_reuse_epoch_itr to false, defaults to --grouped-shuffling )", - }, - ) - update_ordered_indices_seed: bool = field( - default=False, - metadata={ - "help": "if true then increment seed with epoch for getting batch iterators, defautls to False.", - }, - ) - - -@dataclass -class OptimizationConfig(FairseqDataclass): - max_epoch: int = field( - default=0, metadata={"help": "force stop training at specified epoch"} - ) - max_update: int = field( - default=0, metadata={"help": "force stop training at specified update"} - ) - stop_time_hours: float = field( - default=0, - metadata={ - "help": "force stop training after specified cumulative time (if >0)" - }, - ) - clip_norm: float = field( - default=0.0, metadata={"help": "clip threshold of gradients"} - ) - sentence_avg: bool = field( - default=False, - metadata={ - "help": "normalize gradients by the number of sentences in a batch" - " (default is to normalize by number of tokens)" - }, - ) - update_freq: List[int] = field( - default_factory=lambda: [1], - metadata={"help": "update parameters every N_i batches, when in epoch i"}, - ) - lr: List[float] = field( - default_factory=lambda: [0.25], - metadata={ - "help": "learning rate for the first N epochs; all epochs >N using LR_N" - " (note: this may be interpreted differently depending on --lr-scheduler)" - }, - ) - stop_min_lr: float = field( - default=-1.0, - metadata={"help": "stop training when the learning rate reaches this minimum"}, - ) - use_bmuf: bool = field( - default=False, - metadata={ - "help": "specify global optimizer for syncing models on different GPUs/shards" - }, - ) - skip_remainder_batch: Optional[bool] = field( - default=False, - metadata={ - "help": "if set, include the last (partial) batch of each epoch in training" - " (default is to skip it)." - }, - ) - - -@dataclass -class CheckpointConfig(FairseqDataclass): - save_dir: str = field( - default="checkpoints", metadata={"help": "path to save checkpoints"} - ) - restore_file: str = field( - default="checkpoint_last.pt", - metadata={ - "help": "filename from which to load checkpoint " - "(default: /checkpoint_last.pt" - }, - ) - continue_once: Optional[str] = field( - default=None, - metadata={ - "help": "continues from this checkpoint, unless a checkpoint indicated in 'restore_file' option is present" - }, - ) - finetune_from_model: Optional[str] = field( - default=None, - metadata={ - "help": "finetune from a pretrained model; note that meters and lr scheduler will be reset" - }, - ) - reset_dataloader: bool = field( - default=False, - metadata={ - "help": "if set, does not reload dataloader state from the checkpoint" - }, - ) - reset_lr_scheduler: bool = field( - default=False, - metadata={ - "help": "if set, does not load lr scheduler state from the checkpoint" - }, - ) - reset_meters: bool = field( - default=False, - metadata={"help": "if set, does not load meters from the checkpoint"}, - ) - reset_optimizer: bool = field( - default=False, - metadata={"help": "if set, does not load optimizer state from the checkpoint"}, - ) - optimizer_overrides: str = field( - default="{}", - metadata={ - "help": "a dictionary used to override optimizer args when loading a checkpoint" - }, - ) - save_interval: int = field( - default=1, metadata={"help": "save a checkpoint every N epochs"} - ) - save_interval_updates: int = field( - default=0, metadata={"help": "save a checkpoint (and validate) every N updates"} - ) - keep_interval_updates: int = field( - default=-1, - metadata={ - "help": "keep the last N checkpoints saved with --save-interval-updates" - }, - ) - keep_interval_updates_pattern: int = field( - default=-1, - metadata={ - "help": "when used with --keep-interval-updates, skips deleting " - "any checkpoints with update X where " - "X %% keep_interval_updates_pattern == 0" - }, - ) - keep_last_epochs: int = field( - default=-1, metadata={"help": "keep last N epoch checkpoints"} - ) - keep_best_checkpoints: int = field( - default=-1, metadata={"help": "keep best N checkpoints based on scores"} - ) - no_save: bool = field( - default=False, metadata={"help": "don't save models or checkpoints"} - ) - no_epoch_checkpoints: bool = field( - default=False, metadata={"help": "only store last and best checkpoints"} - ) - no_last_checkpoints: bool = field( - default=False, metadata={"help": "don't store last checkpoints"} - ) - no_save_optimizer_state: bool = field( - default=False, - metadata={"help": "don't save optimizer-state as part of checkpoint"}, - ) - best_checkpoint_metric: str = field( - default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'} - ) - maximize_best_checkpoint_metric: bool = field( - default=False, - metadata={ - "help": 'select the largest metric value for saving "best" checkpoints' - }, - ) - patience: int = field( - default=-1, - metadata={ - "help": ( - "early stop training if valid performance doesn't " - "improve for N consecutive validation runs; note " - "that this is influenced by --validate-interval" - ) - }, - ) - checkpoint_suffix: str = field( - default="", metadata={"help": "suffix to add to the checkpoint file name"} - ) - checkpoint_shard_count: int = field( - default=1, - metadata={ - "help": "Number of shards containing the checkpoint - " - "if the checkpoint is over 300GB, it is preferable " - "to split it into shards to prevent OOM on CPU while loading " - "the checkpoint" - }, - ) - load_checkpoint_on_all_dp_ranks: bool = field( - default=False, - metadata={ - "help": "load checkpoints on all data parallel devices " - "(default: only load on rank 0 and broadcast to other devices)" - }, - ) - write_checkpoints_asynchronously: bool = field( - default=False, - metadata={ - "help": ( - "Write checkpoints asynchronously in a separate " - "thread. NOTE: This feature is currently being tested." - ), - "argparse_alias": "--save-async", - }, - ) - model_parallel_size: int = II("common.model_parallel_size") - - -@dataclass -class FairseqBMUFConfig(FairseqDataclass): - block_lr: float = field( - default=1, metadata={"help": "block learning rate for bmuf"} - ) - block_momentum: float = field( - default=0.875, metadata={"help": "block momentum for bmuf"} - ) - global_sync_iter: int = field( - default=50, metadata={"help": "Iteration for syncing global model"} - ) - warmup_iterations: int = field( - default=500, metadata={"help": "warmup iterations for model to broadcast"} - ) - use_nbm: bool = field( - default=False, - metadata={"help": "Specify whether you want to use classical BM / Nesterov BM"}, - ) - average_sync: bool = field( - default=False, - metadata={ - "help": "Specify whether you want to average the local momentum after each sync" - }, - ) - distributed_world_size: int = II("distributed_training.distributed_world_size") - - -@dataclass -class GenerationConfig(FairseqDataclass): - beam: int = field( - default=5, - metadata={"help": "beam size"}, - ) - nbest: int = field( - default=1, - metadata={"help": "number of hypotheses to output"}, - ) - max_len_a: float = field( - default=0, - metadata={ - "help": "generate sequences of maximum length ax + b, where x is the source length" - }, - ) - max_len_b: int = field( - default=200, - metadata={ - "help": "generate sequences of maximum length ax + b, where x is the source length" - }, - ) - min_len: int = field( - default=1, - metadata={"help": "minimum generation length"}, - ) - match_source_len: bool = field( - default=False, - metadata={"help": "generations should match the source length"}, - ) - unnormalized: bool = field( - default=False, - metadata={"help": "compare unnormalized hypothesis scores"}, - ) - no_early_stop: bool = field( - default=False, - metadata={"help": "deprecated"}, - ) - no_beamable_mm: bool = field( - default=False, - metadata={"help": "don't use BeamableMM in attention layers"}, - ) - lenpen: float = field( - default=1, - metadata={ - "help": "length penalty: <1.0 favors shorter, >1.0 favors longer sentences" - }, - ) - unkpen: float = field( - default=0, - metadata={ - "help": "unknown word penalty: <0 produces more unks, >0 produces fewer" - }, - ) - replace_unk: Optional[str] = field( - default=None, - metadata={ - "help": "perform unknown replacement (optionally with alignment dictionary)", - "argparse_const": "@@ ", - }, - ) - sacrebleu: bool = field( - default=False, - metadata={"help": "score with sacrebleu"}, - ) - score_reference: bool = field( - default=False, - metadata={"help": "just score the reference translation"}, - ) - prefix_size: int = field( - default=0, - metadata={"help": "initialize generation by target prefix of given length"}, - ) - no_repeat_ngram_size: int = field( - default=0, - metadata={ - "help": "ngram blocking such that this size ngram cannot be repeated in the generation" - }, - ) - sampling: bool = field( - default=False, - metadata={"help": "sample hypotheses instead of using beam search"}, - ) - sampling_topk: int = field( - default=-1, - metadata={"help": "sample from top K likely next words instead of all words"}, - ) - sampling_topp: float = field( - default=-1.0, - metadata={ - "help": "sample from the smallest set whose cumulative probability mass exceeds p for next words" - }, - ) - constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field( - default=None, - metadata={ - "help": "enables lexically constrained decoding", - "argparse_const": "ordered", - }, - ) - temperature: float = field( - default=1.0, - metadata={"help": "temperature for generation"}, - ) - diverse_beam_groups: int = field( - default=-1, - metadata={"help": "number of groups for Diverse Beam Search"}, - ) - diverse_beam_strength: float = field( - default=0.5, - metadata={"help": "strength of diversity penalty for Diverse Beam Search"}, - ) - diversity_rate: float = field( - default=-1.0, - metadata={"help": "strength of diversity penalty for Diverse Siblings Search"}, - ) - print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field( - default=None, - metadata={ - "help": "if set, uses attention feedback to compute and print alignment to source tokens " - "(valid options are: hard, soft, otherwise treated as hard alignment)", - "argparse_const": "hard", - }, - ) - print_step: bool = field( - default=False, - metadata={"help": "print steps"}, - ) - lm_path: Optional[str] = field( - default=None, - metadata={"help": "path to lm checkpoint for lm fusion"}, - ) - lm_weight: float = field( - default=0.0, - metadata={"help": "weight for lm probs for lm fusion"}, - ) - - # arguments for iterative refinement generator - iter_decode_eos_penalty: float = field( - default=0.0, - metadata={"help": "if > 0.0, it penalized early-stopping in decoding."}, - ) - iter_decode_max_iter: int = field( - default=10, - metadata={"help": "maximum iterations for iterative refinement."}, - ) - iter_decode_force_max_iter: bool = field( - default=False, - metadata={ - "help": "if set, run exact the maximum number of iterations without early stop" - }, - ) - iter_decode_with_beam: int = field( - default=1, - metadata={ - "help": "if > 1, model will generate translations varying by the lengths." - }, - ) - iter_decode_with_external_reranker: bool = field( - default=False, - metadata={ - "help": "if set, the last checkpoint are assumed to be a reranker to rescore the translations" - }, - ) - retain_iter_history: bool = field( - default=False, - metadata={ - "help": "if set, decoding returns the whole history of iterative refinement" - }, - ) - retain_dropout: bool = field( - default=False, - metadata={"help": "Use dropout at inference time"}, - ) - # temporarily set to Any until https://github.com/facebookresearch/hydra/issues/1117 is fixed - # retain_dropout_modules: Optional[List[str]] = field( - retain_dropout_modules: Any = field( - default=None, - metadata={ - "help": "if set, only retain dropout for the specified modules; " - "if not set, then dropout will be retained for all modules" - }, - ) - # special decoding format for advanced decoding. - decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field( - default=None, - metadata={"help": "special decoding format for advanced decoding."}, - ) - no_seed_provided: bool = field( - default=False, - metadata={"help": "if set, dont use seed for initializing random generators"}, - ) - eos_token: Optional[str] = field( - default=None, - metadata={"help": "EOS token"}, - ) - - -@dataclass -class CommonEvalConfig(FairseqDataclass): - path: Optional[str] = field( - default=None, - metadata={"help": "path(s) to model file(s), colon separated"}, - ) - post_process: Optional[str] = field( - default=None, - metadata={ - "help": ( - "post-process text by removing BPE, letter segmentation, etc. " - "Valid options can be found in fairseq.data.utils.post_process." - ), - "argparse_const": "subword_nmt", - "argparse_alias": "--remove-bpe", - }, - ) - quiet: bool = field(default=False, metadata={"help": "only print final scores"}) - model_overrides: str = field( - default="{}", - metadata={ - "help": "a dictionary used to override model args at generation that were used during model training" - }, - ) - results_path: Optional[str] = field( - default=None, metadata={"help": "path to save eval results (optional)"} - ) - - -@dataclass -class EvalLMConfig(FairseqDataclass): - output_word_probs: bool = field( - default=False, - metadata={ - "help": "if set, outputs words and their predicted log probabilities to standard output" - }, - ) - output_word_stats: bool = field( - default=False, - metadata={ - "help": "if set, outputs word statistics such as word count, average probability, etc" - }, - ) - context_window: int = field( - default=0, - metadata={ - "help": "ensures that every evaluated token has access to a context of at least this size, if possible" - }, - ) - softmax_batch: int = field( - default=sys.maxsize, - metadata={ - "help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory" - }, - ) - - -@dataclass -class InteractiveConfig(FairseqDataclass): - buffer_size: int = field( - default=0, - metadata={ - "help": "read this many sentences into a buffer before processing them" - }, - ) - input: str = field( - default="-", - metadata={"help": "file to read from; use - for stdin"}, - ) - - -@dataclass -class EMAConfig(FairseqDataclass): - store_ema: bool = field( - default=False, metadata={help: "store exponential moving average shadow model"} - ) - ema_decay: float = field( - default=0.9999, metadata={"help": "decay for exponential moving average model"} - ) - ema_start_update: int = field( - default=0, metadata={"help": "start EMA update after this many model updates"} - ) - ema_seed_model: Optional[str] = field( - default=None, - metadata={ - "help": "Seed to load EMA model from. " - "Used to load EMA model separately from the actual model." - }, - ) - ema_update_freq: int = field( - default=1, metadata={"help": "Do EMA update every this many model updates"} - ) - ema_fp32: bool = field( - default=False, - metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"}, - ) - - -@dataclass -class FairseqConfig(FairseqDataclass): - common: CommonConfig = CommonConfig() - common_eval: CommonEvalConfig = CommonEvalConfig() - distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() - dataset: DatasetConfig = DatasetConfig() - optimization: OptimizationConfig = OptimizationConfig() - checkpoint: CheckpointConfig = CheckpointConfig() - bmuf: FairseqBMUFConfig = FairseqBMUFConfig() - generation: GenerationConfig = GenerationConfig() - eval_lm: EvalLMConfig = EvalLMConfig() - interactive: InteractiveConfig = InteractiveConfig() - model: Any = MISSING - task: Any = None - criterion: Any = None - optimizer: Any = None - lr_scheduler: Any = None - scoring: Any = None - bpe: Any = None - tokenizer: Any = None - ema: EMAConfig = EMAConfig() diff --git a/spaces/aseuteurideu/audio_deepfake_detector/data/__init__.py b/spaces/aseuteurideu/audio_deepfake_detector/data/__init__.py deleted file mode 100644 index a02e758d6ac34ba5d1a5dec73626569661aa8756..0000000000000000000000000000000000000000 --- a/spaces/aseuteurideu/audio_deepfake_detector/data/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch.utils.data - -class DataProvider(): - - def __init__(self, cfg, dataset, batch_size=None, shuffle=True): - super().__init__() - self.dataset = dataset - if batch_size is None: - batch_size = cfg.BATCH_SIZE - self.dataloader = torch.utils.data.DataLoader( - self.dataset, - batch_size=batch_size, - shuffle=shuffle, - num_workers=int(cfg.WORKERS), - drop_last=False) - - def __len__(self): - return len(self.dataset) - - def __iter__(self): - for i, data in enumerate(self.dataloader): - yield data \ No newline at end of file diff --git a/spaces/ashercn97/AsherTesting/extensions/multimodal/abstract_pipeline.py b/spaces/ashercn97/AsherTesting/extensions/multimodal/abstract_pipeline.py deleted file mode 100644 index 584219419d256e7743fd4d5120c56bcfa8f2a9f9..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/extensions/multimodal/abstract_pipeline.py +++ /dev/null @@ -1,62 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -import torch -from PIL import Image - - -class AbstractMultimodalPipeline(ABC): - @staticmethod - @abstractmethod - def name() -> str: - 'name of the pipeline, should be same as in --multimodal-pipeline' - pass - - @staticmethod - @abstractmethod - def image_start() -> Optional[str]: - 'return image start string, string representation of image start token, or None if not applicable' - pass - - @staticmethod - @abstractmethod - def image_end() -> Optional[str]: - 'return image end string, string representation of image end token, or None if not applicable' - pass - - @staticmethod - @abstractmethod - def placeholder_token_id() -> int: - 'return placeholder token id' - pass - - @staticmethod - @abstractmethod - def num_image_embeds() -> int: - 'return the number of embeds used by a single image (for example: 256 for LLaVA)' - pass - - @abstractmethod - def embed_images(self, images: List[Image.Image]) -> torch.Tensor: - 'forward the images through vision pipeline, and return their embeddings' - pass - - @staticmethod - @abstractmethod - def embed_tokens(input_ids: torch.Tensor) -> torch.Tensor: - 'embed tokens, the exact function varies by LLM, for LLaMA it is `shared.model.model.embed_tokens`' - pass - - @staticmethod - @abstractmethod - def placeholder_embeddings() -> torch.Tensor: - 'get placeholder embeddings if there are multiple images, and `add_all_images_to_prompt` is False' - pass - - def _get_device(self, setting_name: str, params: dict): - if params[setting_name] is None: - return torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - return torch.device(params[setting_name]) - - def _get_dtype(self, setting_name: str, params: dict): - return torch.float32 if int(params[setting_name]) == 32 else torch.float16 diff --git a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/sem_grounding_net.py b/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/sem_grounding_net.py deleted file mode 100644 index 80ef6dd49dc5bcc58205913276c39982b07320b9..0000000000000000000000000000000000000000 --- a/spaces/attention-refocusing/Attention-refocusing/gligen/ldm/modules/diffusionmodules/sem_grounding_net.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch -import torch.nn as nn -from ldm.modules.attention import BasicTransformerBlock -from ldm.modules.diffusionmodules.util import checkpoint, FourierEmbedder -import torch.nn.functional as F -from ..attention import SelfAttention, FeedForward -from .convnext import convnext_tiny - - - - -class PositionNet(nn.Module): - def __init__(self, resize_input=448, in_dim=152, out_dim=768): - super().__init__() - - self.resize_input = resize_input - self.down_factor = 32 # determined by the convnext backbone - self.out_dim = out_dim - assert self.resize_input % self.down_factor == 0 - - self.in_conv = nn.Conv2d(in_dim,3,3,1,1) # from num_sem to 3 channels - self.convnext_tiny_backbone = convnext_tiny(pretrained=True) - - self.num_tokens = (self.resize_input // self.down_factor) ** 2 - - convnext_feature_dim = 768 - self.pos_embedding = nn.Parameter(torch.empty(1, self.num_tokens, convnext_feature_dim).normal_(std=0.02)) # from BERT - - self.linears = nn.Sequential( - nn.Linear( convnext_feature_dim, 512), - nn.SiLU(), - nn.Linear( 512, 512), - nn.SiLU(), - nn.Linear(512, out_dim), - ) - - self.null_feature = torch.nn.Parameter(torch.zeros([convnext_feature_dim])) - - - def forward(self, sem, mask): - B = sem.shape[0] - - # token from edge map - sem = torch.nn.functional.interpolate(sem, self.resize_input, mode="nearest") - sem = self.in_conv(sem) - sem_feature = self.convnext_tiny_backbone(sem) - objs = sem_feature.reshape(B, -1, self.num_tokens) - objs = objs.permute(0, 2, 1) # N*Num_tokens*dim - - # expand null token - null_objs = self.null_feature.view(1,1,-1) - null_objs = null_objs.repeat(B,self.num_tokens,1) - - # mask replacing - mask = mask.view(-1,1,1) - objs = objs*mask + null_objs*(1-mask) - - # add pos - objs = objs + self.pos_embedding - - # fuse them - objs = self.linears(objs) - - assert objs.shape == torch.Size([B,self.num_tokens,self.out_dim]) - return objs - - - diff --git a/spaces/avans06/whisper-webui-translate/src/vad.py b/spaces/avans06/whisper-webui-translate/src/vad.py deleted file mode 100644 index e9fd8347557d32c074e66a96d18a1e7e218210a5..0000000000000000000000000000000000000000 --- a/spaces/avans06/whisper-webui-translate/src/vad.py +++ /dev/null @@ -1,587 +0,0 @@ -from abc import ABC, abstractmethod -from collections import Counter, deque -import os -import time - -from typing import Any, Deque, Iterator, List, Dict - -from pprint import pprint -from src.hooks.progressListener import ProgressListener -from src.hooks.subTaskProgressListener import SubTaskProgressListener -from src.hooks.whisperProgressHook import create_progress_listener_handle -from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache - -from src.segments import merge_timestamps -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback - -# Workaround for https://github.com/tensorflow/tensorflow/issues/48797 -try: - import tensorflow as tf -except ModuleNotFoundError: - # Error handling - pass - -import torch - -import ffmpeg -import numpy as np - -from src.utils import format_timestamp -from enum import Enum - -class NonSpeechStrategy(Enum): - """ - Ignore non-speech frames segments. - """ - SKIP = 1 - """ - Just treat non-speech segments as speech. - """ - CREATE_SEGMENT = 2 - """ - Expand speech segments into subsequent non-speech segments. - """ - EXPAND_SEGMENT = 3 - -# Defaults for Silero -SPEECH_TRESHOLD = 0.3 - -# Minimum size of segments to process -MIN_SEGMENT_DURATION = 1 - -# The maximum time for texts from old segments to be used in the next segment -MAX_PROMPT_WINDOW = 0 # seconds (0 = disabled) -PROMPT_NO_SPEECH_PROB = 0.1 # Do not pass the text from segments with a no speech probability higher than this - -VAD_MAX_PROCESSING_CHUNK = 60 * 60 # 60 minutes of audio - -class TranscriptionConfig(ABC): - def __init__(self, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP, - segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None, - max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1): - self.non_speech_strategy = non_speech_strategy - self.segment_padding_left = segment_padding_left - self.segment_padding_right = segment_padding_right - self.max_silent_period = max_silent_period - self.max_merge_size = max_merge_size - self.max_prompt_window = max_prompt_window - self.initial_segment_index = initial_segment_index - -class PeriodicTranscriptionConfig(TranscriptionConfig): - def __init__(self, periodic_duration: float, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP, - segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None, - max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1): - super().__init__(non_speech_strategy, segment_padding_left, segment_padding_right, max_silent_period, max_merge_size, max_prompt_window, initial_segment_index) - self.periodic_duration = periodic_duration - -class AbstractTranscription(ABC): - def __init__(self, sampling_rate: int = 16000): - self.sampling_rate = sampling_rate - - def get_audio_segment(self, str, start_time: str = None, duration: str = None): - return load_audio(str, self.sampling_rate, start_time, duration) - - def is_transcribe_timestamps_fast(self): - """ - Determine if get_transcribe_timestamps is fast enough to not need parallelization. - """ - return False - - @abstractmethod - def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float): - """ - Get the start and end timestamps of the sections that should be transcribed by this VAD method. - - Parameters - ---------- - audio: str - The audio file. - config: TranscriptionConfig - The transcription configuration. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - return - - def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: TranscriptionConfig, total_duration: float): - """ - Get the start and end timestamps of the sections that should be transcribed by this VAD method, - after merging the given segments using the specified configuration. - - Parameters - ---------- - audio: str - The audio file. - config: TranscriptionConfig - The transcription configuration. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - merged = merge_timestamps(timestamps, config.max_silent_period, config.max_merge_size, - config.segment_padding_left, config.segment_padding_right) - - if config.non_speech_strategy != NonSpeechStrategy.SKIP: - # Expand segments to include the gaps between them - if (config.non_speech_strategy == NonSpeechStrategy.CREATE_SEGMENT): - # When we have a prompt window, we create speech segments betwen each segment if we exceed the merge size - merged = self.fill_gaps(merged, total_duration=total_duration, max_expand_size=config.max_merge_size) - elif config.non_speech_strategy == NonSpeechStrategy.EXPAND_SEGMENT: - # With no prompt window, it is better to just expand the segments (this effectively passes the prompt to the next segment) - merged = self.expand_gaps(merged, total_duration=total_duration) - else: - raise Exception("Unknown non-speech strategy: " + str(config.non_speech_strategy)) - - print("Transcribing non-speech:") - pprint(merged) - return merged - - def transcribe(self, audio: str, whisperCallable: AbstractWhisperCallback, config: TranscriptionConfig, - progressListener: ProgressListener = None): - """ - Transcribe the given audo file. - - Parameters - ---------- - audio: str - The audio file. - whisperCallable: WhisperCallback - A callback object to call to transcribe each segment. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - - try: - max_audio_duration = self.get_audio_duration(audio, config) - timestamp_segments = self.get_transcribe_timestamps(audio, config, 0, max_audio_duration) - - # Get speech timestamps from full audio file - merged = self.get_merged_timestamps(timestamp_segments, config, max_audio_duration) - - # A deque of transcribed segments that is passed to the next segment as a prompt - prompt_window = deque() - - print("Processing timestamps:") - pprint(merged) - - result = { - 'text': "", - 'segments': [], - 'language': "" - } - languageCounter = Counter() - detected_language = None - - segment_index = config.initial_segment_index - - # Calculate progress - progress_start_offset = merged[0]['start'] if len(merged) > 0 else 0 - progress_total_duration = sum([segment['end'] - segment['start'] for segment in merged]) - sub_task_total = 1/len(merged) - - # For each time segment, run whisper - for idx, segment in enumerate(merged): - segment_index += 1 - segment_start = segment['start'] - segment_end = segment['end'] - segment_expand_amount = segment.get('expand_amount', 0) - segment_gap = segment.get('gap', False) - - segment_duration = segment_end - segment_start - - if segment_duration < MIN_SEGMENT_DURATION: - continue - - # Audio to run on Whisper - segment_audio = self.get_audio_segment(audio, start_time = str(segment_start), duration = str(segment_duration)) - # Previous segments to use as a prompt - segment_prompt = ' '.join([segment['text'] for segment in prompt_window]) if len(prompt_window) > 0 else None - - # Detected language - detected_language = languageCounter.most_common(1)[0][0] if len(languageCounter) > 0 else None - - print("Running whisper from ", format_timestamp(segment_start), " to ", format_timestamp(segment_end), ", duration: ", - segment_duration, "expanded: ", segment_expand_amount, ", prompt: ", segment_prompt, ", detected language: ", detected_language) - - perf_start_time = time.perf_counter() - - scaled_progress_listener = SubTaskProgressListener(progressListener, - base_task_total=progressListener.sub_task_total if isinstance(progressListener, SubTaskProgressListener) else progress_total_duration, - sub_task_start=idx*(1/len(merged)), - sub_task_total=1/len(merged)) - segment_result = whisperCallable.invoke(segment_audio, segment_index, segment_prompt, detected_language, progress_listener=scaled_progress_listener) - - perf_end_time = time.perf_counter() - print("Whisper took {} seconds".format(perf_end_time - perf_start_time)) - - adjusted_segments = self.adjust_timestamp(segment_result["segments"], adjust_seconds=segment_start, max_source_time=segment_duration) - - # Propagate expand amount to the segments - if (segment_expand_amount > 0): - segment_without_expansion = segment_duration - segment_expand_amount - - for adjusted_segment in adjusted_segments: - adjusted_segment_end = adjusted_segment['end'] - - # Add expand amount if the segment got expanded - if (adjusted_segment_end > segment_without_expansion): - adjusted_segment["expand_amount"] = adjusted_segment_end - segment_without_expansion - - # Append to output - result['text'] += segment_result['text'] - result['segments'].extend(adjusted_segments) - - # Increment detected language - if not segment_gap: - languageCounter[segment_result['language']] += 1 - - # Update prompt window - self.__update_prompt_window(prompt_window, adjusted_segments, segment_end, segment_gap, config) - - if detected_language is not None: - result['language'] = detected_language - finally: - # Notify progress listener that we are done - if progressListener is not None: - progressListener.on_finished() - return result - - def get_audio_duration(self, audio: str, config: TranscriptionConfig): - return get_audio_duration(audio) - - def __update_prompt_window(self, prompt_window: Deque, adjusted_segments: List, segment_end: float, segment_gap: bool, config: TranscriptionConfig): - if (config.max_prompt_window is not None and config.max_prompt_window > 0): - # Add segments to the current prompt window (unless it is a speech gap) - if not segment_gap: - for segment in adjusted_segments: - if segment.get('no_speech_prob', 0) <= PROMPT_NO_SPEECH_PROB: - prompt_window.append(segment) - - while (len(prompt_window) > 0): - first_end_time = prompt_window[0].get('end', 0) - # Time expanded in the segments should be discounted from the prompt window - first_expand_time = prompt_window[0].get('expand_amount', 0) - - if (first_end_time - first_expand_time < segment_end - config.max_prompt_window): - prompt_window.popleft() - else: - break - - def include_gaps(self, segments: Iterator[dict], min_gap_length: float, total_duration: float): - result = [] - last_end_time = 0 - - for segment in segments: - segment_start = float(segment['start']) - segment_end = float(segment['end']) - - if (last_end_time != segment_start): - delta = segment_start - last_end_time - - if (min_gap_length is None or delta >= min_gap_length): - result.append( { 'start': last_end_time, 'end': segment_start, 'gap': True } ) - - last_end_time = segment_end - result.append(segment) - - # Also include total duration if specified - if (total_duration is not None and last_end_time < total_duration): - delta = total_duration - segment_start - - if (min_gap_length is None or delta >= min_gap_length): - result.append( { 'start': last_end_time, 'end': total_duration, 'gap': True } ) - - return result - - # Expand the end time of each segment to the start of the next segment - def expand_gaps(self, segments: List[Dict[str, Any]], total_duration: float): - result = [] - - if len(segments) == 0: - return result - - # Add gap at the beginning if needed - if (segments[0]['start'] > 0): - result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } ) - - for i in range(len(segments) - 1): - current_segment = segments[i] - next_segment = segments[i + 1] - - delta = next_segment['start'] - current_segment['end'] - - # Expand if the gap actually exists - if (delta >= 0): - current_segment = current_segment.copy() - current_segment['expand_amount'] = delta - current_segment['end'] = next_segment['start'] - - result.append(current_segment) - - # Add last segment - last_segment = segments[-1] - result.append(last_segment) - - # Also include total duration if specified - if (total_duration is not None): - last_segment = result[-1] - - if (last_segment['end'] < total_duration): - last_segment = last_segment.copy() - last_segment['end'] = total_duration - result[-1] = last_segment - - return result - - def fill_gaps(self, segments: List[Dict[str, Any]], total_duration: float, max_expand_size: float = None): - result = [] - - if len(segments) == 0: - return result - - # Add gap at the beginning if needed - if (segments[0]['start'] > 0): - result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } ) - - for i in range(len(segments) - 1): - expanded = False - current_segment = segments[i] - next_segment = segments[i + 1] - - delta = next_segment['start'] - current_segment['end'] - - if (max_expand_size is not None and delta <= max_expand_size): - # Just expand the current segment - current_segment = current_segment.copy() - current_segment['expand_amount'] = delta - current_segment['end'] = next_segment['start'] - expanded = True - - result.append(current_segment) - - # Add a gap to the next segment if needed - if (delta >= 0 and not expanded): - result.append({ 'start': current_segment['end'], 'end': next_segment['start'], 'gap': True } ) - - # Add last segment - last_segment = segments[-1] - result.append(last_segment) - - # Also include total duration if specified - if (total_duration is not None): - last_segment = result[-1] - - delta = total_duration - last_segment['end'] - - if (delta > 0): - if (max_expand_size is not None and delta <= max_expand_size): - # Expand the last segment - last_segment = last_segment.copy() - last_segment['expand_amount'] = delta - last_segment['end'] = total_duration - result[-1] = last_segment - else: - result.append({ 'start': last_segment['end'], 'end': total_duration, 'gap': True } ) - - return result - - def adjust_timestamp(self, segments: Iterator[dict], adjust_seconds: float, max_source_time: float = None): - result = [] - - for segment in segments: - segment_start = float(segment['start']) - segment_end = float(segment['end']) - - # Filter segments? - if (max_source_time is not None): - if (segment_start > max_source_time): - continue - segment_end = min(max_source_time, segment_end) - - new_segment = segment.copy() - - # Add to start and end - new_segment['start'] = segment_start + adjust_seconds - new_segment['end'] = segment_end + adjust_seconds - - # Handle words - if ('words' in new_segment): - for word in new_segment['words']: - # Adjust start and end - word['start'] = word['start'] + adjust_seconds - word['end'] = word['end'] + adjust_seconds - - result.append(new_segment) - return result - - def multiply_timestamps(self, timestamps: List[Dict[str, Any]], factor: float): - result = [] - - for entry in timestamps: - start = entry['start'] - end = entry['end'] - - result.append({ - 'start': start * factor, - 'end': end * factor - }) - return result - - -class VadSileroTranscription(AbstractTranscription): - def __init__(self, sampling_rate: int = 16000, cache: ModelCache = None): - super().__init__(sampling_rate=sampling_rate) - self.model = None - self.cache = cache - self._initialize_model() - - def _initialize_model(self): - if (self.cache is not None): - model_key = "VadSileroTranscription" - self.model, self.get_speech_timestamps = self.cache.get(model_key, self._create_model) - print("Loaded Silerio model from cache.") - else: - self.model, self.get_speech_timestamps = self._create_model() - print("Created Silerio model") - - def _create_model(self): - repo_owner = "snakers4" - repo_name = "silero-vad" - ref = "master" - - try: - model, utils = torch.hub.load(repo_or_dir=f'{repo_owner}/{repo_name}', model='silero_vad') - except Exception as e: - hub_dir = torch.hub.get_dir() - owner_name_branch = '_'.join([repo_owner, repo_name, ref]) - repo_dir = os.path.join(hub_dir, owner_name_branch) - if os.path.exists(repo_dir): - print(f"vad.py: torch.hub.load({repo_owner}/{repo_name}) Exception: {str(e)}, Using cache found in {repo_dir}\n") - model, utils = torch.hub.load(repo_or_dir=repo_dir, model='silero_vad', source="local") - else: - raise - - - # Silero does not benefit from multi-threading - torch.set_num_threads(1) # JIT - (get_speech_timestamps, _, _, _, _) = utils - - return model, get_speech_timestamps - - def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float): - result = [] - - print("Getting timestamps from audio file: {}, start: {}, duration: {}".format(audio, start_time, end_time)) - perf_start_time = time.perf_counter() - - # Divide procesisng of audio into chunks - chunk_start = start_time - - while (chunk_start < end_time): - chunk_duration = min(end_time - chunk_start, VAD_MAX_PROCESSING_CHUNK) - - print("Processing VAD in chunk from {} to {}".format(format_timestamp(chunk_start), format_timestamp(chunk_start + chunk_duration))) - wav = self.get_audio_segment(audio, str(chunk_start), str(chunk_duration)) - - sample_timestamps = self.get_speech_timestamps(wav, self.model, sampling_rate=self.sampling_rate, threshold=SPEECH_TRESHOLD) - seconds_timestamps = self.multiply_timestamps(sample_timestamps, factor=1 / self.sampling_rate) - adjusted = self.adjust_timestamp(seconds_timestamps, adjust_seconds=chunk_start, max_source_time=chunk_start + chunk_duration) - - #pprint(adjusted) - - result.extend(adjusted) - chunk_start += chunk_duration - - perf_end_time = time.perf_counter() - print("VAD processing took {} seconds".format(perf_end_time - perf_start_time)) - - return result - - def __getstate__(self): - # We only need the sampling rate - return { 'sampling_rate': self.sampling_rate } - - def __setstate__(self, state): - self.sampling_rate = state['sampling_rate'] - self.model = None - # Use the global cache - self.cache = GLOBAL_MODEL_CACHE - self._initialize_model() - -# A very simple VAD that just marks every N seconds as speech -class VadPeriodicTranscription(AbstractTranscription): - def __init__(self, sampling_rate: int = 16000): - super().__init__(sampling_rate=sampling_rate) - - def is_transcribe_timestamps_fast(self): - # This is a very fast VAD - no need to parallelize it - return True - - def get_transcribe_timestamps(self, audio: str, config: PeriodicTranscriptionConfig, start_time: float, end_time: float): - result = [] - - # Generate a timestamp every N seconds - start_timestamp = start_time - - while (start_timestamp < end_time): - end_timestamp = min(start_timestamp + config.periodic_duration, end_time) - segment_duration = end_timestamp - start_timestamp - - # Minimum duration is 1 second - if (segment_duration >= 1): - result.append( { 'start': start_timestamp, 'end': end_timestamp } ) - - start_timestamp = end_timestamp - - return result - -def get_audio_duration(file: str): - return float(ffmpeg.probe(file)["format"]["duration"]) - -def load_audio(file: str, sample_rate: int = 16000, - start_time: str = None, duration: str = None): - """ - Open an audio file and read as mono waveform, resampling as necessary - - Parameters - ---------- - file: str - The audio file to open - - sr: int - The sample rate to resample the audio if necessary - - start_time: str - The start time, using the standard FFMPEG time duration syntax, or None to disable. - - duration: str - The duration, using the standard FFMPEG time duration syntax, or None to disable. - - Returns - ------- - A NumPy array containing the audio waveform, in float32 dtype. - """ - try: - inputArgs = {'threads': 0} - - if (start_time is not None): - inputArgs['ss'] = start_time - if (duration is not None): - inputArgs['t'] = duration - - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - out, _ = ( - ffmpeg.input(file, **inputArgs) - .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sample_rate) - .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True) - ) - except ffmpeg.Error as e: - raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") - - return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 \ No newline at end of file diff --git a/spaces/avirathtibrewala/YTToText/inference.py b/spaces/avirathtibrewala/YTToText/inference.py deleted file mode 100644 index a284fb64f2e7e6764ef93fd266e14740c6060628..0000000000000000000000000000000000000000 --- a/spaces/avirathtibrewala/YTToText/inference.py +++ /dev/null @@ -1,35 +0,0 @@ -import pywhisper -import os -import pytube - -def downloadYTVideo(url): - youtubeVideo = pytube.YouTube(url) - audio = youtubeVideo.streams.filter(only_audio=True).first() - out_file = audio.download(output_path='.') - base, ext = os.path.splitext(out_file) - new_file = base + '.mp3' - os.rename(out_file, new_file) - return new_file - -def main(link, model): - try: - file = downloadYTVideo(link) - except: - print("Link is broken. Please check it") - return - try: - whisper_model = pywhisper.load_model(model) - res = whisper_model.transcribe(file) - res = res['text'] - os.remove(file) - return res - except: - return - - -if __name__ == "__main__": - main() - - - - diff --git a/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/README.md b/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/README.md deleted file mode 100644 index 027bd067a1470cdf2c573f540ab2035489363dad..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ChatGPT-QA-Translation-Summary-14/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ChatGPT QA Translation Summary 14 -emoji: 🏢 -colorFrom: gray -colorTo: pink -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/MadLibs/gamestate.py b/spaces/awacke1/MadLibs/gamestate.py deleted file mode 100644 index 7fddc8724e19e33f2f1048bf43c306775a4a6273..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MadLibs/gamestate.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import TypeVar -import dataclasses - -import streamlit as st - -StateT = TypeVar('StateT') - -def persistent_game_state(initial_state: StateT) -> StateT: - session_id = st.report_thread.get_report_ctx().session_id - session = st.server.server.Server.get_current()._get_session_info(session_id).session - if not hasattr(session, '_gamestate'): - setattr(session, '_gamestate', initial_state) - return session._gamestate diff --git a/spaces/awacke1/VisionImageClassifierGradio/app.py b/spaces/awacke1/VisionImageClassifierGradio/app.py deleted file mode 100644 index 84fc3c6cccbd64554399a5e3e68f4c9cde5300a0..0000000000000000000000000000000000000000 --- a/spaces/awacke1/VisionImageClassifierGradio/app.py +++ /dev/null @@ -1,39 +0,0 @@ -from huggingface_hub import from_pretrained_keras -import tensorflow as tf -from tensorflow_addons.optimizers import AdamW -import numpy as np -import gradio as gr - -tf.keras.optimizers.AdamW = AdamW -model = from_pretrained_keras("keras-io/vit_small_ds_v2") - - - -def softmax(x): - f_x = np.exp(x) / np.sum(np.exp(x)) - return f_x - -labels = ["apple", "aquarium_fish", "baby", "bear", "beaver", "bed", "bee", "beetle", "bicycle", "bottle", "bowl", "boy", "bridge", "bus", "butterfly", "camel", "can", "castle", "caterpillar", "cattle", "chair", "chimpanzee", "clock", "cloud", "cockroach", "couch", "cra", "crocodile", "cup", "dinosaur", "dolphin", "elephant", "flatfish", "forest", "fox", "girl", "hamster", "house", "kangaroo", "keyboard", "lamp", "lawn_mower", "leopard", "lion", "lizard", "lobster", "man", "maple_tree", "motorcycle", "mountain", "mouse", "mushroom", "oak_tree", "orange", "orchid", "otter", "palm_tree", "pear", "pickup_truck", "pine_tree", "plain", "plate", "poppy", "porcupine", "possum", "rabbit", "raccoon", "ray", "road", "rocket", "rose", "sea", "seal", "shark", "shrew", "skunk", "skyscraper", "snail", "snake", "spider", "squirrel", "streetcar", "sunflower", "sweet_pepper", "table", "tank", "telephone", "television", "tiger", "tractor", "train", "trout", "tulip", "turtle", "wardrobe", "whale", "willow_tree", "wolf", "woman", "worm"] - -def classify_image(image): - image = image.reshape((-1, 32, 32, 3)) - pred = model.predict(image) - prediction = softmax(pred)[0] - return {labels[i]: float(prediction[i]) for i in range(100)} - -image = gr.inputs.Image(shape=(32,32)) -label = gr.outputs.Label(num_top_classes=5) - -iface = gr.Interface(classify_image,image,label, - #outputs=[ - # gr.outputs.Textbox(label="Engine issue"), - # gr.outputs.Textbox(label="Engine issue score")], - examples=[["1.jpg"],["2.jpg"],["3.jpg"],["4.jpg"],["5.jpg"]], - title="Image classification on CIFAR-100", - description = "Model for classifying images from the CIFAR dataset using a vision transformer trained with small data.", - article = "Author: Jónathan Heras" -# examples = ["sample.csv"], -) - - -iface.launch() \ No newline at end of file diff --git a/spaces/beephids/paper-llm/test-repo.py b/spaces/beephids/paper-llm/test-repo.py deleted file mode 100644 index 61c20ad78e79f49978e08fdae05968f4088aa6a6..0000000000000000000000000000000000000000 --- a/spaces/beephids/paper-llm/test-repo.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import csv -import panel as pn -from datetime import datetime -import pandas as pd -# import huggingface_hub -from huggingface_hub import Repository - -DATASET_REPO_URL = "https://huggingface.co/datasets/julien-c/persistent-space-dataset" -DATA_FILENAME = "data.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) - -repo_dir = "data" -if not os.path.exists(repo_dir): - os.makedirs(repo_dir) - -repo = pn.widgets.TextInput(name="Repository URL", value=DATASET_REPO_URL) -name_input = pn.widgets.TextInput(name="Your name") -message_input = pn.widgets.TextAreaInput(name="Your message") - -def generate_html() -> str: - if not os.path.exists(DATA_FILE): - return "
      no messages yet
      " - else: - df = pd.read_csv(DATA_FILE) - df = df.iloc[::-1] # Reverse the order of rows - html = "
      " - for _, row in df.iterrows(): - html += "
      " - html += f"{row['name']}" - html += f"{row['message']}" - html += "
      " - html += "
      " - return html - -def store_message(event): - name = name_input.value - message = message_input.value - if name and message: - with open(DATA_FILE, "a", newline="") as csvfile: - writer = csv.writer(csvfile) - writer.writerow([name, message, str(datetime.now())]) - pn.state.session_context.request_relayout(repo) - pn.state.session_context.request_relayout(messages) - -repo.on_change(store_message, "value") -name_input.on_change(store_message, "value") -message_input.on_change(store_message, "value") - -repo_text = pn.pane.Markdown(f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL}) (open in new tab)", sizing_mode="stretch_width") -messages = pn.pane.HTML(sizing_mode="stretch_width") -messages.object = generate_html() - -template = pn.template.FastListTemplate( - site="Persistent Space Dataset", - title="Reading/Writing to a HuggingFace Dataset Repo from Spaces", - main=[ - pn.Column(repo_text, name_input, message_input, messages), - ], - header_background="#333", - header_color="white", - main_max_width="800px", - main_padding=20, - main_min_height="600px" -) -template.servable() \ No newline at end of file diff --git a/spaces/binker/interpreter/web_ui.py b/spaces/binker/interpreter/web_ui.py deleted file mode 100644 index c89172cf74a0a5887841990847c2ff104c5f7fe0..0000000000000000000000000000000000000000 --- a/spaces/binker/interpreter/web_ui.py +++ /dev/null @@ -1,185 +0,0 @@ -from response_parser import * -import gradio as gr - - -def initialization(state_dict: Dict) -> None: - if not os.path.exists('cache'): - os.mkdir('cache') - if state_dict["bot_backend"] is None: - state_dict["bot_backend"] = BotBackend() - if 'OPENAI_API_KEY' in os.environ: - del os.environ['OPENAI_API_KEY'] - - -def get_bot_backend(state_dict: Dict) -> BotBackend: - return state_dict["bot_backend"] - - -def switch_to_gpt4(state_dict: Dict, whether_switch: bool) -> None: - bot_backend = get_bot_backend(state_dict) - if whether_switch: - bot_backend.update_gpt_model_choice("GPT-4") - else: - bot_backend.update_gpt_model_choice("GPT-3.5") - - -def add_text(state_dict: Dict, history: List, text: str) -> Tuple[List, Dict]: - bot_backend = get_bot_backend(state_dict) - bot_backend.add_text_message(user_text=text) - - history = history + [(text, None)] - - return history, gr.update(value="", interactive=False) - - -def add_file(state_dict: Dict, history: List, file) -> List: - bot_backend = get_bot_backend(state_dict) - path = file.name - filename = os.path.basename(path) - - bot_msg = [f'📁[{filename}]', None] - history.append(bot_msg) - - bot_backend.add_file_message(path=path, bot_msg=bot_msg) - - return history - - -def undo_upload_file(state_dict: Dict, history: List) -> Tuple[List, Dict]: - bot_backend = get_bot_backend(state_dict) - bot_msg = bot_backend.revoke_file() - - if bot_msg is None: - return history, gr.Button.update(interactive=False) - - else: - assert history[-1] == bot_msg - del history[-1] - if bot_backend.revocable_files: - return history, gr.Button.update(interactive=True) - else: - return history, gr.Button.update(interactive=False) - - -def refresh_file_display(state_dict: Dict) -> List[str]: - bot_backend = get_bot_backend(state_dict) - work_dir = bot_backend.jupyter_work_dir - filenames = os.listdir(work_dir) - paths = [] - for filename in filenames: - paths.append( - os.path.join(work_dir, filename) - ) - return paths - - -def restart_ui(history: List) -> Tuple[List, Dict, Dict, Dict, Dict]: - history.clear() - return ( - history, - gr.Textbox.update(value="", interactive=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False), - gr.Button.update(interactive=False) - ) - - -def restart_bot_backend(state_dict: Dict) -> None: - bot_backend = get_bot_backend(state_dict) - bot_backend.restart() - - -def bot(state_dict: Dict, history: List) -> List: - bot_backend = get_bot_backend(state_dict) - - while bot_backend.finish_reason in ('new_input', 'function_call'): - if history[-1][0] is None: - history.append( - [None, ""] - ) - else: - history[-1][1] = "" - - response = chat_completion(bot_backend=bot_backend) - for chunk in response: - history, weather_exit = parse_response( - chunk=chunk, - history=history, - bot_backend=bot_backend - ) - yield history - if weather_exit: - exit(-1) - - yield history - - -if __name__ == '__main__': - config = get_config() - with gr.Blocks(theme=gr.themes.Base()) as block: - """ - Reference: https://www.gradio.app/guides/creating-a-chatbot-fast - """ - # UI components - state = gr.State(value={"bot_backend": None}) - with gr.Tab("Chat"): - chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=750) - with gr.Row(): - with gr.Column(scale=0.85): - text_box = gr.Textbox( - show_label=False, - placeholder="Enter text and press enter, or upload a file", - container=False - ) - with gr.Column(scale=0.15, min_width=0): - file_upload_button = gr.UploadButton("📁", file_types=['file']) - with gr.Row(equal_height=True): - with gr.Column(scale=0.7): - check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available']) - check_box.change(fn=switch_to_gpt4, inputs=[state, check_box]) - with gr.Column(scale=0.15, min_width=0): - restart_button = gr.Button(value='🔄 Restart') - with gr.Column(scale=0.15, min_width=0): - undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False) - with gr.Tab("Files"): - file_output = gr.Files() - - # Components function binding - txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then( - bot, [state, chatbot], chatbot - ) - txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output]) - txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False) - txt_msg.then(lambda: gr.Button.update(interactive=False), None, [undo_file_button], queue=False) - - file_msg = file_upload_button.upload( - add_file, [state, chatbot, file_upload_button], [chatbot], queue=False - ).then( - bot, [state, chatbot], chatbot - ) - file_msg.then(lambda: gr.Button.update(interactive=True), None, [undo_file_button], queue=False) - file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output]) - - undo_file_button.click( - fn=undo_upload_file, inputs=[state, chatbot], outputs=[chatbot, undo_file_button] - ).then( - fn=refresh_file_display, inputs=[state], outputs=[file_output] - ) - - restart_button.click( - fn=restart_ui, inputs=[chatbot], - outputs=[chatbot, text_box, restart_button, file_upload_button, undo_file_button] - ).then( - fn=restart_bot_backend, inputs=[state], queue=False - ).then( - fn=refresh_file_display, inputs=[state], outputs=[file_output] - ).then( - fn=lambda: (gr.Textbox.update(interactive=True), gr.Button.update(interactive=True), - gr.Button.update(interactive=True)), - inputs=None, outputs=[text_box, restart_button, file_upload_button], queue=False - ) - - block.load(fn=initialization, inputs=[state]) - - block.queue() - block.launch(inbrowser=True) diff --git a/spaces/bioriAsaeru/text-to-voice/Free Download Auto Tune Evo Software Transform Your Vocals with the Best Auto-Tune Plugin Ever.md b/spaces/bioriAsaeru/text-to-voice/Free Download Auto Tune Evo Software Transform Your Vocals with the Best Auto-Tune Plugin Ever.md deleted file mode 100644 index 7671c807ed3f23cab86fb0bac0c2e4dfb11f814b..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Free Download Auto Tune Evo Software Transform Your Vocals with the Best Auto-Tune Plugin Ever.md +++ /dev/null @@ -1,12 +0,0 @@ -
      -

      Its pitch correction module is efficient and easy to use, earning Graillon 2 the number one spot on the autotune freebie list. It is compatible with all digital audio workstations on Windows and macOS.

      -

      This free autotune effect is flexible and easy to operate, with adjustable speed, range, scale, and depth. The added stereo widening feature can be helpful in a vocal processing chain, but make sure to double-check your mix in mono when using it.

      -

      Free Download Auto Tune Evo Software


      DOWNLOADhttps://urloso.com/2uyQ0H



      -

      Aside from those few drawbacks, MAutoPitch is a brilliant free autotune VST that could quickly become your go-to pitch correction tool. Just like Graillon 2, it is compatible with all VST and AU plugin hosts on PC and Mac.

      -

      GSnap is an old freeware autotune plugin. It was the first free autotune VST on the market. Pitch correction software was still somewhat of a rarity back in the day when GSnap was released.

      -

      Unlike Graillon 2 and MAutoPitch, GSnap will only work on Windows-based systems. It does come with a very well-written manual, though. The instructions are worth reading if you decide to use GSnap as your go-to free autotune effect.

      -

      Although Voloco is available as a VST3 and AU plugin on desktop operating systems, it is primarily used on iOS and Android. The app version of Voloco is easily the best free autotune for mobile devices.

      -

      -

      Downloading from SoftCamel is always safe. We check every download offered on our website to make sure your information and device are protected. Additionally, our files are hosted on fast, reliable and efficient servers to make sure you achieve high and stable download speeds. On our website you will find a database of software, games and apps which you can access for free. We have never asked for a login or payment to download from our website, and we never will. This is why you can trust SoftCamel for all your download needs.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/activations.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/activations.py deleted file mode 100644 index 084ce8c41230dcde25f0c01311a4c0abcd4584e7..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/activations.py +++ /dev/null @@ -1,103 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Activation functions -""" - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class SiLU(nn.Module): - # SiLU activation https://arxiv.org/pdf/1606.08415.pdf - @staticmethod - def forward(x): - return x * torch.sigmoid(x) - - -class Hardswish(nn.Module): - # Hard-SiLU activation - @staticmethod - def forward(x): - # return x * F.hardsigmoid(x) # for TorchScript and CoreML - return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX - - -class Mish(nn.Module): - # Mish activation https://github.com/digantamisra98/Mish - @staticmethod - def forward(x): - return x * F.softplus(x).tanh() - - -class MemoryEfficientMish(nn.Module): - # Mish activation memory-efficient - class F(torch.autograd.Function): - - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - fx = F.softplus(x).tanh() - return grad_output * (fx + x * sx * (1 - fx * fx)) - - def forward(self, x): - return self.F.apply(x) - - -class FReLU(nn.Module): - # FReLU activation https://arxiv.org/abs/2007.11824 - def __init__(self, c1, k=3): # ch_in, kernel - super().__init__() - self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) - self.bn = nn.BatchNorm2d(c1) - - def forward(self, x): - return torch.max(x, self.bn(self.conv(x))) - - -class AconC(nn.Module): - r""" ACON activation (activate or not) - AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1): - super().__init__() - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) - - def forward(self, x): - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x - - -class MetaAconC(nn.Module): - r""" ACON activation (activate or not) - MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r - super().__init__() - c2 = max(r, c1 // r) - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) - self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) - # self.bn1 = nn.BatchNorm2d(c2) - # self.bn2 = nn.BatchNorm2d(c1) - - def forward(self, x): - y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) - # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 - # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable - beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/spaces/cactusfriend/nightmareprompts/README.md b/spaces/cactusfriend/nightmareprompts/README.md deleted file mode 100644 index ae38aa2557804b57d66734ec1f6410caabe5eb1f..0000000000000000000000000000000000000000 --- a/spaces/cactusfriend/nightmareprompts/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Nightmare InvokeAI Prompts -emoji: 🍝 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.26.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/carisackc/ClinicalNoteDemo/README.md b/spaces/carisackc/ClinicalNoteDemo/README.md deleted file mode 100644 index 4669ebc8f684251d232c7fbc9d6d1d20ab48644d..0000000000000000000000000000000000000000 --- a/spaces/carisackc/ClinicalNoteDemo/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Clinical -emoji: 💩 -colorFrom: blue -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: other -duplicated_from: carisackc/Clinical ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/roi_heads/box_head.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/roi_heads/box_head.py deleted file mode 100644 index 5d0370b0400d9268f13c905e4096a84ce42e9bfd..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/roi_heads/box_head.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -from typing import List -import fvcore.nn.weight_init as weight_init -import torch -from torch import nn - -from detectron2.config import configurable -from detectron2.layers import Conv2d, ShapeSpec, get_norm -from detectron2.utils.registry import Registry - -__all__ = ["FastRCNNConvFCHead", "build_box_head", "ROI_BOX_HEAD_REGISTRY"] - -ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") -ROI_BOX_HEAD_REGISTRY.__doc__ = """ -Registry for box heads, which make box predictions from per-region features. - -The registered object will be called with `obj(cfg, input_shape)`. -""" - - -# To get torchscript support, we make the head a subclass of `nn.Sequential`. -# Therefore, to add new layers in this head class, please make sure they are -# added in the order they will be used in forward(). -@ROI_BOX_HEAD_REGISTRY.register() -class FastRCNNConvFCHead(nn.Sequential): - """ - A head with several 3x3 conv layers (each followed by norm & relu) and then - several fc layers (each followed by relu). - """ - - @configurable - def __init__( - self, input_shape: ShapeSpec, *, conv_dims: List[int], fc_dims: List[int], conv_norm="" - ): - """ - NOTE: this interface is experimental. - - Args: - input_shape (ShapeSpec): shape of the input feature. - conv_dims (list[int]): the output dimensions of the conv layers - fc_dims (list[int]): the output dimensions of the fc layers - conv_norm (str or callable): normalization for the conv layers. - See :func:`detectron2.layers.get_norm` for supported types. - """ - super().__init__() - assert len(conv_dims) + len(fc_dims) > 0 - - self._output_size = (input_shape.channels, input_shape.height, input_shape.width) - - self.conv_norm_relus = [] - for k, conv_dim in enumerate(conv_dims): - conv = Conv2d( - self._output_size[0], - conv_dim, - kernel_size=3, - padding=1, - bias=not conv_norm, - norm=get_norm(conv_norm, conv_dim), - activation=nn.ReLU(), - ) - self.add_module("conv{}".format(k + 1), conv) - self.conv_norm_relus.append(conv) - self._output_size = (conv_dim, self._output_size[1], self._output_size[2]) - - self.fcs = [] - for k, fc_dim in enumerate(fc_dims): - if k == 0: - self.add_module("flatten", nn.Flatten()) - fc = nn.Linear(int(np.prod(self._output_size)), fc_dim) - self.add_module("fc{}".format(k + 1), fc) - self.add_module("fc_relu{}".format(k + 1), nn.ReLU()) - self.fcs.append(fc) - self._output_size = fc_dim - - for layer in self.conv_norm_relus: - weight_init.c2_msra_fill(layer) - for layer in self.fcs: - weight_init.c2_xavier_fill(layer) - - @classmethod - def from_config(cls, cfg, input_shape): - num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV - conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM - num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC - fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM - return { - "input_shape": input_shape, - "conv_dims": [conv_dim] * num_conv, - "fc_dims": [fc_dim] * num_fc, - "conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM, - } - - def forward(self, x): - for layer in self: - x = layer(x) - return x - - @property - @torch.jit.unused - def output_shape(self): - """ - Returns: - ShapeSpec: the output feature shape - """ - o = self._output_size - if isinstance(o, int): - return ShapeSpec(channels=o) - else: - return ShapeSpec(channels=o[0], height=o[1], width=o[2]) - - -def build_box_head(cfg, input_shape): - """ - Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. - """ - name = cfg.MODEL.ROI_BOX_HEAD.NAME - return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/spaces/ccds/vits_onnx/app/text/cleaners.py b/spaces/ccds/vits_onnx/app/text/cleaners.py deleted file mode 100644 index 657951af902d0884b1ecc110e2ea932c6903b50a..0000000000000000000000000000000000000000 --- a/spaces/ccds/vits_onnx/app/text/cleaners.py +++ /dev/null @@ -1,58 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - -pyopenjtalk._lazy_init() - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - - -def japanese_cleaners(text): - '''Pipeline for notating accent in Japanese text.''' - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', 'ʃ').replace('cl', 'Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - if re.match('[A-Za-z]', text[-1]): - text += '.' - return text - - -def japanese_cleaners2(text): - return japanese_cleaners(text).replace('ts','ʦ').replace('...','…') diff --git a/spaces/chansung/LLM-As-Chatbot/models/mpt.py b/spaces/chansung/LLM-As-Chatbot/models/mpt.py deleted file mode 100644 index 452c2e4cc30a9ee9ee0f1baf7e43e3d464fafdf7..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLM-As-Chatbot/models/mpt.py +++ /dev/null @@ -1,55 +0,0 @@ -import torch -import global_vars -from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig -from optimum.bettertransformer import BetterTransformer - -def load_model( - base, - finetuned, - mode_cpu, - mode_mps, - mode_full_gpu, - mode_8bit, - mode_4bit, - force_download_ckpt -): - tokenizer = AutoTokenizer.from_pretrained(base, trust_remote_code=True) - tokenizer.padding_side = "left" - - if mode_cpu: - print("cpu mode") - model = AutoModelForCausalLM.from_pretrained( - base, - device_map={"": "cpu"}, - use_safetensors=False, - trust_remote_code=True, - ) - - elif mode_mps: - print("mps mode") - model = AutoModelForCausalLM.from_pretrained( - base, - device_map={"": "mps"}, - torch_dtype=torch.float16, - use_safetensors=False, - trust_remote_code=True - ) - - else: - print("gpu mode") - print(f"8bit = {mode_8bit}, 4bit = {mode_4bit}") - model = AutoModelForCausalLM.from_pretrained( - base, - load_in_8bit=mode_8bit, - load_in_4bit=mode_4bit, - device_map="auto", - trust_remote_code=True, - torch_dtype=torch.float16, - use_safetensors=False, - )#.to(global_vars.device) - - if not mode_8bit and not mode_4bit: - model.half() - - # model = BetterTransformer.transform(model) - return model, tokenizer \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/dataset_zoo/__init__.py b/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/dataset_zoo/__init__.py deleted file mode 100644 index 9a3a44ad27ac4a4e9b1cc84bce075329b47141cc..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/dataset_zoo/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .aro_datasets import VG_Relation, VG_Attribution, COCO_Order, Flickr30k_Order -from .retrieval import COCO_Retrieval, Flickr30k_Retrieval - - -def get_dataset(dataset_name, image_preprocess=None, text_perturb_fn=None, image_perturb_fn=None, download=False, *args, **kwargs): - """ - Helper function that returns a dataset object with an evaluation function. - dataset_name: Name of the dataset. - image_preprocess: Preprocessing function for images. - text_perturb_fn: A function that takes in a string and returns a string. This is for perturbation experiments. - image_perturb_fn: A function that takes in a PIL image and returns a PIL image. This is for perturbation experiments. - download: Whether to allow downloading images if they are not found. - """ - if dataset_name == "VG_Relation": - from .aro_datasets import get_visual_genome_relation - return get_visual_genome_relation(image_preprocess=image_preprocess, text_perturb_fn=text_perturb_fn, image_perturb_fn=image_perturb_fn, download=download, *args, **kwargs) - elif dataset_name == "VG_Attribution": - from .aro_datasets import get_visual_genome_attribution - return get_visual_genome_attribution(image_preprocess=image_preprocess, text_perturb_fn=text_perturb_fn, image_perturb_fn=image_perturb_fn, download=download, *args, **kwargs) - elif dataset_name == "COCO_Order": - from .aro_datasets import get_coco_order - return get_coco_order(image_preprocess=image_preprocess, text_perturb_fn=text_perturb_fn, image_perturb_fn=image_perturb_fn, download=download, *args, **kwargs) - elif dataset_name == "Flickr30k_Order": - from .aro_datasets import get_flickr30k_order - return get_flickr30k_order(image_preprocess=image_preprocess, text_perturb_fn=text_perturb_fn, image_perturb_fn=image_perturb_fn, download=download, *args, **kwargs) - elif dataset_name == "COCO_Retrieval": - from .retrieval import get_coco_retrieval - return get_coco_retrieval(image_preprocess=image_preprocess, text_perturb_fn=text_perturb_fn, image_perturb_fn=image_perturb_fn, download=download, *args, **kwargs) - elif dataset_name == "Flickr30k_Retrieval": - from .retrieval import get_flickr30k_retrieval - return get_flickr30k_retrieval(image_preprocess=image_preprocess, text_perturb_fn=text_perturb_fn, image_perturb_fn=image_perturb_fn, download=download, *args, **kwargs) - else: - raise ValueError(f"Unknown dataset {dataset_name}") diff --git a/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/evaluate.py b/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/evaluate.py deleted file mode 100644 index b4f3586649e669f423edeb803ac5eba5df283a9d..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/open_flamingo/eval/evaluate.py +++ /dev/null @@ -1,1435 +0,0 @@ -import argparse -import json -from math import ceil -import os -import random -import uuid -from collections import defaultdict -from typing import Callable -import time -import cv2 -import webdataset as wds -from sklearn.metrics import recall_score, average_precision_score - -import more_itertools -import numpy as np -import torch -from coco_metric import compute_cider, postprocess_captioning_generation -from eval_datasets import VQADataset -from tqdm import tqdm -from collections import Counter - -from vqa_metric import compute_vqa_accuracy, compute_gqa_accuracy -from open_flamingo.eval.classification import ( - compute_per_sample_probs, - compute_per_sample_loss, -) -from open_flamingo.eval.imagenet_utils import ( - openai_imagenet_classnames, - IMAGENET_1K_CLASS_ID_TO_LABEL, -) - -from open_flamingo.src.factory import create_model_and_transforms -from PIL import Image -from io import BytesIO -import base64 -from open_flamingo.train.distributed import init_distributed_device, world_info_from_env -import string -from open_flamingo.eval.task.reg import evaluate_reg -from open_flamingo.eval.task.gqa import GQADataset -from open_flamingo.eval.task.vl_checklist import evaluate_vlc -from open_flamingo.eval.task.crepe import evaluate_crepe -from open_flamingo.eval.task.caption import evaluate_coco_flickr -from open_flamingo.eval.task.utils import is_correct, get_iou -from open_flamingo.eval.task.cola import evaluate_cola -from open_flamingo.eval.task.gqa import evaluate_gqa - -def expand2square(pil_img, background_color): - width, height = pil_img.size - if width == height: - return pil_img - elif width > height: - result = Image.new(pil_img.mode, (width, width), background_color) - result.paste(pil_img, (0, (width - height) // 2)) - return result - else: - result = Image.new(pil_img.mode, (height, height), background_color) - result.paste(pil_img, ((height - width) // 2, 0)) - return result - -parser = argparse.ArgumentParser() -parser.add_argument("--lm_path", type=str, default="facebook/opt-1.3b") -parser.add_argument("--lm_tokenizer_path", type=str, default="facebook/opt-30b") -parser.add_argument("--vision_encoder_path", default="ViT-L-14", type=str) -parser.add_argument("--vision_encoder_pretrained", default="openai", type=str) -parser.add_argument("--checkpoint_path", type=str, required=True) -parser.add_argument( - "--results_file", type=str, default=None, help="JSON file to save results" -) - -# Trial arguments -parser.add_argument("--shots", nargs="+", default=[0, 4, 8, 16, 32], type=int) -parser.add_argument( - "--num_trials", - type=int, - default=1, - help="Number of trials to run for each shot using different demonstrations", -) -parser.add_argument( - "--trial_seeds", - nargs="+", - default=[0], - help="Seeds to use for each trial for picking demonstrations and eval sets", -) -parser.add_argument( - "--num_samples", type=int, default=5000, help="Number of samples to evaluate on" -) - -parser.add_argument("--batch_size", type=int, default=8) - -# Per-dataset evaluation flags -parser.add_argument( - "--eval_coco", - action="store_true", - default=False, - help="Whether to evaluate on COCO.", -) -parser.add_argument( - "--eval_vqav2", - action="store_true", - default=False, - help="Whether to evaluate on VQAV2.", -) -parser.add_argument( - "--eval_ok_vqa", - action="store_true", - default=False, - help="Whether to evaluate on OK-VQA.", -) -parser.add_argument( - "--eval_imagenet", - action="store_true", - default=False, - help="Whether to evaluate on ImageNet.", -) - -parser.add_argument( - "--eval_flickr30", - action="store_true", - default=False, - help="Whether to evaluate on Flickr30.", -) - -parser.add_argument( - "--eval_refcoco", - action="store_true", - default=False, - help="Whether to evaluate on RefCOCO.", -) - -# Dataset arguments - -## Flickr30 Dataset -parser.add_argument( - "--flickr_image_dir_path", - type=str, - help="Path to the flickr30/flickr30k_images directory.", - default=None, -) -parser.add_argument( - "--flickr_annotations_json_path", - type=str, - help="Path to the dataset_flickr30k_coco_style.json file.", - default=None, -) - -## COCO Dataset -parser.add_argument( - "--coco_image_dir_path", - type=str, - help="Path to the flickr30/flickr30k_images directory.", - default=None, -) -parser.add_argument( - "--coco_annotations_json_path", - type=str, - default=None, -) - -## VQAV2 Dataset -parser.add_argument( - "--vqav2_image_dir_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_questions_json_path", - type=str, - default=None, -) -parser.add_argument( - "--vqav2_annotations_json_path", - type=str, - default=None, -) - -## OK-VQA Dataset -parser.add_argument( - "--ok_vqa_image_dir_path", - type=str, - help="Path to the vqav2/train2014 directory.", - default=None, -) -parser.add_argument( - "--ok_vqa_questions_json_path", - type=str, - help="Path to the v2_OpenEnded_mscoco_train2014_questions.json file.", - default=None, -) -parser.add_argument( - "--ok_vqa_annotations_json_path", - type=str, - help="Path to the v2_mscoco_train2014_annotations.json file.", - default=None, -) - -## Imagenet dataset -parser.add_argument("--imagenet_root", type=str, default="/tmp") - -## RefCOCO dataset -parser.add_argument("--refcoco_tsvfile", type=str, default=None) - -parser.add_argument( - "--location_token_num", - default=1000, - type=int, -) -# distributed training -parser.add_argument( - "--dist-url", - default="env://", - type=str, - help="url used to set up distributed training", -) -parser.add_argument( - "--dist-backend", default="nccl", type=str, help="distributed backend" -) -parser.add_argument( - "--horovod", - default=False, - action="store_true", - help="Use horovod for distributed training.", -) -parser.add_argument( - "--no-set-device-rank", - default=False, - action="store_true", - help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc).", -) -parser.add_argument( - "--dist", - default=False, - action="store_true", -) -parser.add_argument( - "--lora", - default=False, - action="store_true", -) -parser.add_argument( - "--lora_r", - default=16, - type=int, - required=False, -) -parser.add_argument( - "--legacy", - default=False, - action="store_true", -) -parser.add_argument( - "--special", - default=False, - action="store_true", -) -parser.add_argument( - "--id", - default=0, - type=int, - required=False, -) - -parser.add_argument( - "--eval_gqa", - default=False, - action="store_true", -) -parser.add_argument( - "--use_sam", - default=None, - type=str, - required=False, -) -parser.add_argument( - "--add_visual_token", - default=False, - action="store_true", -) -parser.add_argument( - "--use_format_v2", - default=False, - action="store_true", -) -parser.add_argument( - "--eval_aro", - default=False, - action="store_true", -) -parser.add_argument( - "--eval_pisc", - default=False, - action="store_true", -) -parser.add_argument( - "--eval_reg", - default=False, - action="store_true", -) -parser.add_argument( - "--eval_vlc", - default=False, - action="store_true", -) -parser.add_argument( - "--eval_crepe", - default=False, - action="store_true", -) -parser.add_argument( - "--eval_cola", - default=False, - action="store_true", -) -parser.add_argument( - "--level", - default=4, - type=int, -) -parser.add_argument( - "--type", - default="swap", - type=str, -) -parser.add_argument( - "--choose_left_right", - default=False, - action="store_true", -) - - -class OKVQAPostProcess(): - def __init__(self): - self._lemmatizer = None - - def _lemmatize(self, answers): - def apply(answer): - doc = self.lemmatizer(answer) - - words = [] - for token in doc: - if token.pos_ in ["NOUN", "VERB"]: - words.append(token.lemma_) - else: - words.append(token.text) - answer = " ".join(words) - - return answer - - return [apply(answer) for answer in answers] - - @property - def lemmatizer(self): - if self._lemmatizer is None: - try: - import spacy - - self._lemmatizer = spacy.load("en_core_web_sm") - except ImportError: - logging.error( - """ - Please install spacy and en_core_web_sm model to apply lemmatization. - python -m spacy download en_core_web_sm - OR - import spacy.cli - spacy.cli.download("en_core_web_sm") - """ - ) - exit(1) - - return self._lemmatizer - - -def main(): - args = parser.parse_args() - if args.dist: - args.local_rank, args.rank, args.world_size = world_info_from_env() - print(f"local_rank: {args.local_rank} rank: {args.rank} world_size: {args.world_size}") - device_id = init_distributed_device(args) - else: - args.rank = 0 - args.world_size = 1 - print(f"rank: {args.rank} world_size: {args.world_size}") - - if "sam" in args.checkpoint_path: - args.use_sam = "vit_l" - - args.add_visual_token = True - if "lora" in args.checkpoint_path: - args.lora = True - - - args.add_pe = False - args.add_box = True - args.relation = False - args.enhance_data = False - args.use_format_v2 = True - - - - import hashlib - args.id = hashlib.sha224(args.checkpoint_path.encode()).hexdigest() - - # load model - flamingo, image_processor, tokenizer, vis_embed_size = create_model_and_transforms( - args.vision_encoder_path, - args.vision_encoder_pretrained, - args.lm_path, - args.lm_tokenizer_path, - location_token_num=args.location_token_num, - lora=args.lora, - lora_r=16, - use_sam=args.use_sam, - add_visual_token=args.add_visual_token, - use_format_v2=args.use_format_v2, - add_box=args.add_box, - add_pe=args.add_pe, - add_relation=args.relation, - enhance_data=args.enhance_data, - ) - flamingo.use_format_v2 = args.use_format_v2 - if args.special: - flamingo.special = True - else: - flamingo.special = False - if args.legacy: - flamingo.legacy = True - print("use legacy evaluation") - flamingo.step_num = int(args.checkpoint_path.split("/")[-1].split(".")[0].split("_")[-1]) - flamingo.expr_name = args.checkpoint_path.split("/")[-2] - if args.rank == 0: - print("legacy", True if hasattr(flamingo, "legacy") else False) - print("step:", flamingo.step_num) - print("expr:", flamingo.expr_name) - print("use format v2:", flamingo.use_format_v2) - print(args) - checkpoint = torch.load(args.checkpoint_path, map_location="cpu") - model_state_dict = {} - for key in checkpoint["model_state_dict"].keys(): - model_state_dict[key.replace("module.", "")] = checkpoint["model_state_dict"][key] - if "vision_encoder.logit_scale"in model_state_dict: - # previous checkpoint has some unnecessary weights - del model_state_dict["vision_encoder.logit_scale"] - del model_state_dict["vision_encoder.visual.proj"] - del model_state_dict["vision_encoder.visual.ln_post.weight"] - del model_state_dict["vision_encoder.visual.ln_post.bias"] - flamingo.load_state_dict(model_state_dict, strict=True) - results = defaultdict(list) - if args.eval_coco: - print("Evaluating on COCO...") - cider_score = evaluate_coco_flickr( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - batch_size=args.batch_size, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["coco"].append({"score": cider_score}) - - if args.eval_ok_vqa: - print("Evaluating on OK-VQA...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - ok_vqa_score = evaluate_vqa( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - batch_size=args.batch_size, - image_dir_path=args.ok_vqa_image_dir_path, - questions_json_path=args.ok_vqa_questions_json_path, - annotations_json_path=args.ok_vqa_annotations_json_path, - vqa_dataset="ok_vqa", - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["ok_vqa"].append( - {"shots": shot, "score": ok_vqa_score} - ) - - if args.eval_vqav2: - print("Evaluating on VQAv2...") - for shot in args.shots: - scores = [] - for seed, trial in zip(args.trial_seeds, range(args.num_trials)): - vqa_score = evaluate_vqa( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - batch_size=args.batch_size, - image_dir_path=args.vqav2_image_dir_path, - questions_json_path=args.vqav2_questions_json_path, - annotations_json_path=args.vqav2_annotations_json_path, - vqa_dataset="vqa", - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["vqav2"].append( - {"shots": shot, "score": vqa_score} - ) - - if args.eval_gqa: - print("Evaluating on GQA...") - gqa_score = evaluate_gqa( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - batch_size=args.batch_size, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["gqa"].append( - {"score": gqa_score} - ) - - if args.eval_refcoco: - print("Evaluating on RefCOCO...") - refcoco_score = evaluate_refcoco( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - batch_size=args.batch_size, - device=args.device, - tsvfile=args.refcoco_tsvfile, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["refcoco"].append( - {"score": refcoco_score} - ) - if args.eval_aro: - print("Evaluating on ARO...") - aro_score = evaluate_aro( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - choose_left_right=args.choose_left_right, - ) - results["aro"].append( - {"score": aro_score} - ) - if args.eval_pisc: - print("Evaluating on ARO...") - aro_score = evaluate_pisc( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - batch_size=args.batch_size, - device=args.device, - tsvfile=args.refcoco_tsvfile, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["pisc"].append( - {"score": aro_score} - ) - if args.eval_reg: - print("Evaluating on Referring Expression Generation...") - cider = evaluate_reg( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["reg"].append( - {"score": cider} - ) - if args.eval_vlc: - print("Evaluating on VL-checklist...") - vlc_score = evaluate_vlc( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["vlc"].append( - {"score": vlc_score} - ) - if args.eval_crepe: - print("Evaluating on CREPE...") - crepe_score = evaluate_crepe( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - level=args.level, - type=args.type, - ) - results["crepe"].append( - {"score": crepe_score} - ) - if args.eval_cola: - print("Evaluating on COLA...") - cola_score = evaluate_cola( - model=flamingo, - tokenizer=tokenizer, - image_processor=image_processor, - vis_embed_size=vis_embed_size, - rank=args.rank, - world_size=args.world_size, - id=args.id, - ) - results["cola"].append( - {"score": cola_score} - ) - -def prepare_batch_images(batch, image_processor): - batch_images = None - for b in batch: - b_image = image_processor(b["image"]).unsqueeze(0).unsqueeze(1).unsqueeze(0) - if batch_images is None: - batch_images = b_image - else: - batch_images = torch.cat([batch_images, b_image], dim=0) - return batch_images - -def get_outputs( - model, - batch_images, - attention_mask, - max_generation_length, - min_generation_length, - num_beams, - length_penalty, - input_ids, - image_start_index_list=None, - image_nums=None, - bad_words_ids=None, -): - with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16): - outputs = model.generate( - batch_images, - input_ids, - attention_mask=attention_mask, - max_new_tokens=max_generation_length, - min_length=min_generation_length, - num_beams=num_beams, - length_penalty=length_penalty, - image_start_index_list=image_start_index_list, - image_nums=image_nums, - bad_words_ids=bad_words_ids, - ) - - outputs = outputs[:, len(input_ids[0]) :] - return outputs - - -def evaluate_vqa( - model, - tokenizer, - image_processor, - batch_size, - image_dir_path=None, - questions_json_path=None, - annotations_json_path=None, - vqa_dataset="vqa", - vis_embed_size=None, - rank=0, - world_size=1, - id=0, -): - """ - Evaluate a model on VQA datasets. Currently supports VQA v2.0. - - Args: - model (nn.Module): model to evaluate - tokenizer (transformers.PreTrainedTokenizer): tokenizer for the model - image_processor : image processor for the model - batch_size (int): batch size - image_dir_path (str): path to image directory - questions_json_path (str): path to questions json file - annotations_json_path (str): path to annotations json file - seed (int, optional): random seed. Defaults to 42. - max_generation_length (int, optional): max generation length. Defaults to 5. - num_beams (int, optional): number of beams to use for beam search. Defaults to 3. - length_penalty (float, optional): length penalty for beam search. Defaults to -2.0. - num_samples (int, optional): number of samples to evaluate on. Defaults to 5000 samples. - query_set_size (int, optional): size of the query set. Defaults to 2048. - num_shots (int, optional): number of shots to use. Defaults to 8. - device (int, optional): device to use. Defaults to -1 (cpu). - num_workers (int, optional): number of workers to use. Defaults to 4. - vqa_dataset (string): type of vqa dataset: currently supports vqa, ok_vqa. Defaults to vqa. - Returns: - float: accuracy score - """ - if world_size > 1: - torch.distributed.barrier() - if vqa_dataset == "gqa": - eval_dataset = GQADataset() - else: - eval_dataset = VQADataset( - image_dir_path=image_dir_path, - question_path=questions_json_path, - annotations_path=annotations_json_path, - vqa_dataset=vqa_dataset, - ) - postprocessor = OKVQAPostProcess() - try: - media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] - endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] - pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)["input_ids"][-1] - bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)["input_ids"][-1] - except: - pass - def get_prompt(sample): - return f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>Question: {sample['question'].strip()} Short answer:" - # return f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>" - - model.eval().cuda() - lang_encoder_name = model.lang_encoder.__class__.__name__.lower() - if "peft" in lang_encoder_name: - lang_encoder_name = model.lang_encoder.base_model.model.__class__.__name__.lower() - predictions = [] - tokenizer.padding_side = "left" - if world_size > 1: - torch.distributed.barrier() - this_tot = 0 - for ii, batch in enumerate(more_itertools.chunked( - tqdm(eval_dataset, desc="Running inference", disable=(rank != 0)), batch_size - )): - if ii % world_size != rank: - continue - batch_images = prepare_batch_images( - batch=batch, - image_processor=image_processor, - ).cuda() - batch_text = [get_prompt(s) for s in batch] - encodings = tokenizer( - batch_text, - return_tensors="pt", - padding="longest", - truncation=True, - max_length=2000, - ) - input_ids = encodings["input_ids"].cuda() - attention_mask = encodings["attention_mask"].cuda() - skip_special_tokens = True - if hasattr(model, "legacy") and model.legacy and "opt" in lang_encoder_name: - if rank == 0: - tqdm.write("use legacy model") - for i in range(len(input_ids)): - media_token_index = (input_ids[i] == media_token_id).nonzero()[0,0] - endofmedia_token_index = (input_ids[i] == endofmedia_token_id).nonzero()[0,0] - input_ids[i, media_token_index - 1] = media_token_id - input_ids[i, media_token_index] = pad_token_id - input_ids[i, endofmedia_token_index - 1] = endofmedia_token_id - input_ids[i, endofmedia_token_index] = bos_token_id - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - if "llama" in lang_encoder_name: - attention_mask[input_ids == 0] = 0 - outputs = get_outputs( - model=model, - batch_images=batch_images, - attention_mask=attention_mask, - max_generation_length=10, - min_generation_length=1, - num_beams=5, - length_penalty=0, - input_ids=input_ids, - image_start_index_list=image_start_index_list, - image_nums=image_nums, - ) - # postprocess begin - new_predictions = [ - out.strip().lower().strip(string.punctuation+" ") for out in tokenizer.batch_decode(outputs, skip_special_tokens=skip_special_tokens) - ] - if vqa_dataset == "ok_vqa": - new_predictions = postprocessor._lemmatize(new_predictions) - if model.special: - for i in range(len(new_predictions)): - for answer, _ in Counter(batch[i]['answers']).most_common(): - if answer in new_predictions[i]: - new_predictions[i] = answer - break - if "cant" in new_predictions[i] and "no" == answer: - new_predictions[i] = answer - break - if "can" in new_predictions[i] and "not" not in new_predictions[i] and "cant" not in new_predictions[i] and "yes" == answer: - new_predictions[i] = answer - break - - this_tot += 1 - if rank == 0 and this_tot % 20 == 0: - for i in range(1): - tqdm.write("model output: " + new_predictions[i]) - - predictions.extend( - [ - {"answer": p, "question_id": sample["question_id"], "_question": sample["question"], "answers": sample["answers"]} - for p, sample in zip(new_predictions, batch) - ] - ) - with open(f"{vqa_dataset}_{lang_encoder_name}_results_part{rank}_{id}.json", "w") as f: - f.write(json.dumps(predictions)) - print("save to", f"{vqa_dataset}_{lang_encoder_name}_results_part{rank}_{id}.json") - - time.sleep(10) - if world_size > 1: - torch.distributed.barrier() - if rank == 0: - print(f"evaluate on rank {rank}. world size is {world_size}") - predictions = [] - for rank_i in range(world_size): - print("load", f"{vqa_dataset}_{lang_encoder_name}_results_part{rank_i}_{id}.json") - predictions.extend(json.load(open(f"{vqa_dataset}_{lang_encoder_name}_results_part{rank_i}_{id}.json"))) - os.remove(f"{vqa_dataset}_{lang_encoder_name}_results_part{rank_i}_{id}.json") - print("num:", len(predictions)) - # save the predictions to a temporary file - random_uuid = str(uuid.uuid4()) - with open(f"{vqa_dataset}results_{random_uuid}.json", "w") as f: - f.write(json.dumps(predictions, indent=4)) - - if vqa_dataset == "gqa": - acc = compute_gqa_accuracy(predictions) - else: - acc = compute_vqa_accuracy( - f"{vqa_dataset}results_{random_uuid}.json", - questions_json_path, - annotations_json_path, - vqa_dataset=vqa_dataset, - ) - print(vqa_dataset, "score:", acc, "| save to", f"{vqa_dataset}results_{random_uuid}.json") - os.makedirs("eval_results", exist_ok=True) - with open(os.path.join("eval_results", f"{vqa_dataset}_{model.expr_name}_{model.step_num}_{int(time.time())}_{acc}"), "w") as f: - f.write(json.dumps(predictions, indent=2)) - - # delete the temporary file - os.remove(f"{vqa_dataset}results_{random_uuid}.json") - else: - time.sleep(5) - acc = 0.0 - if world_size > 1: - torch.distributed.barrier() - return acc - - -def evaluate_refcoco( - model, - tokenizer, - image_processor, - batch_size, - tsvfile, - max_generation_length=20, - num_beams=3, - length_penalty=-2.0, - device=-1, - vis_embed_size=None, - rank=0, - world_size=1, - id=0, -): - model.eval().cuda() - loc_token_ids = [] - for i in range(1000): - loc_token_ids.append(int(tokenizer(f"", add_special_tokens=False)["input_ids"][-1])) - media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] - endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] - pad_token_id = tokenizer(tokenizer.pad_token, add_special_tokens=False)["input_ids"][-1] - bos_token_id = tokenizer(tokenizer.bos_token, add_special_tokens=False)["input_ids"][-1] - prebox_token_id = tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1] - object_token_id = tokenizer("<|#object#|>", add_special_tokens=False)["input_ids"][-1] - # all_ids = set(range(model.lang_encoder.lm_head.out_features)) - # bad_words_ids = list(all_ids - set(loc_token_ids)) - # bad_words_ids = [[b] for b in bad_words_ids] - # min_loc_token_id = min(loc_token_ids) - # max_loc_token_id = max(loc_token_ids) - total = 0 - correct = 0 - ious = [] - if "refcocog" in tsvfile: - dataset_name = "refcocog" - elif "refcocoplus" in tsvfile: - dataset_name = "refcocoplus" - else: - dataset_name = "refcoco" - with open(tsvfile, "r") as f: - lines = f.readlines() - pbar = tqdm(lines, disable=(rank != 0)) - for ii, line in enumerate(pbar): - if ii % world_size != rank: - continue - total += 1 - line = line.rstrip() - uniq_id, image_id, text, region_coord, image = line.split("\t") - - image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert("RGB") - # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal2/yolo.png").convert("RGB") - # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal/temp/cat.png").convert("RGB") - # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal/temp/262148000.png") - - gt_box = np.array(list(map(float, region_coord.split(",")))) - width = image.width - height = image.height - image = image.resize((224, 224)) - gt_box = gt_box / np.array([width, height, width, height]) * 224 - batch_images = image_processor(image).unsqueeze(0).unsqueeze(1).unsqueeze(0) - text = text.rstrip('.').strip().replace('"', '').capitalize() - prompt = [f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|><|#object#|>{text}<|#endofobject#|><|#visual#|>"] - # prompt = [f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>the cat<|#visual#|>"] - # prompt = [f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>"] - # prompt = [f"<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>a man<|#visual#|> is doing a trick on a skateboard<|#visual#|>"] - - encodings = tokenizer( - prompt, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - # attention_mask[input_ids == prebox_token_id] = 0 - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - vision_x = batch_images.cuda() - lang_x = input_ids.cuda() - attention_mask = attention_mask.cuda() - - model.debug_id = 0 - with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16): - outputs = model( - vision_x=vision_x, - lang_x=lang_x, - attention_mask=attention_mask, - labels=None, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=None, - add_box=False, - ) - boxes = outputs["boxes"] - scores = outputs["scores"] - boxes = boxes[scores >= scores[0]*0.5] - scores = scores[scores >= scores[0]*0.5] - - text = text.lower().strip() - if text.split(" ")[0] not in ["a", "an", "the", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "several", "some"]: - text = "a " + text - losses = [] - for box, score in zip(boxes, scores): - this_prompt = [f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>There is<|#object#|><|#previsual#|><|#prebox#|><|#object#|> {text}"] - encodings = tokenizer( - this_prompt, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - vision_x = batch_images.cuda() - lang_x = input_ids.cuda() - attention_mask = attention_mask.cuda() - added_bbox_list = [torch.tensor(box / 224).cuda().unsqueeze(0).clamp(0, 0.99)] - labels = lang_x.clone() - start_idx = (lang_x == object_token_id).nonzero()[-1, -1] - labels[0, :start_idx+1] = -100 - with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16): - outputs = model( - vision_x=vision_x, - lang_x=lang_x, - attention_mask=attention_mask, - labels=labels, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=added_bbox_list, - add_box=True, - ) - # print(tokenizer.decode(outputs.logits[0, start_idx].sort(descending=True).indices[:10])) - loss = outputs.loss.detach().cpu() - losses.append((loss.sum() / (loss != 0).sum()).item()) - chosen_idx = np.array(losses).argmin() - pred_box = boxes[chosen_idx] - if chosen_idx != 0: - tqdm.write(f"{text}|{chosen_idx}|{scores[chosen_idx]}") - iou = get_iou(pred_box, gt_box) - if iou >= 0.5: - correct += 1 - # else: - # if rank == 0: - # tqdm.write(text.rstrip('.').strip().lower()) - # open_cv_image = np.array(image) - # # Convert RGB to BGR - # open_cv_image = open_cv_image[:, :, ::-1].copy() - # open_cv_image = cv2.rectangle(open_cv_image, box[:2].astype(int), box[2:].astype(int), (255, 0, 0), 2) - # open_cv_image = cv2.rectangle(open_cv_image, gt_box[:2].astype(int), gt_box[2:].astype(int), (0, 255, 0), 2) - # cv2.imwrite(f"refcocog_result/{ii}_{iou}_{text}.jpg", open_cv_image) - pbar.set_description(f"iou: {iou:.2f} score: {correct / total:.4f}") - # open_cv_image = np.array(image) - # # Convert RGB to BGR - # open_cv_image = open_cv_image[:, :, ::-1].copy() - # for box, score in zip(boxes, scores): - # open_cv_image = cv2.rectangle(open_cv_image, box[:2].astype(int), box[2:].astype(int), (255, 0, 0), 2) - # cv2.imwrite("output.jpg", open_cv_image) - # print(boxes) - # print(scores) - # exit() - - - with open(f"{dataset_name}_results_part{rank}_{id}.json", "w") as f: - f.write(json.dumps([total, correct])) - if world_size > 1: - torch.distributed.barrier() - if rank == 0: - total = 0 - correct = 0 - print(f"evaluate on rank {rank}. world size is {world_size}") - for rank_i in range(world_size): - [total_part, correct_part] = json.load(open(f"{dataset_name}_results_part{rank_i}_{id}.json")) - os.remove(f"{dataset_name}_results_part{rank_i}_{id}.json") - total += total_part - correct += correct_part - score = correct / total - print("score:", score) - with open(os.path.join("eval_results", f"{dataset_name}_{model.expr_name}_{model.step_num}_{int(time.time())}_{score}"), "w") as f: - pass - else: - score = 0.0 - if world_size > 1: - torch.distributed.barrier() - return score - - - -# def preprocess_visual_info(Text): -# text = Text.split(" ") -# for is_idx, t in enumerate(text): -# if t == "is": -# break -# the_idx = is_idx -# while text[the_idx] != "the": -# the_idx -= 1 -# obj_A = " ".join(text[the_idx+1:is_idx]) -# second_the_idx = len(text) - 1 -# while text[second_the_idx] != "the": -# second_the_idx -= 1 -# obj_B = " ".join(text[second_the_idx+1:]) -# visual_obj_A = f"<|#object#|>{obj_A}<|#endofobject#|><|#visual#|><|#box#|><|#endofattr#|>" -# visual_obj_B = f"<|#object#|>{obj_B}<|#endofobject#|><|#visual#|><|#box#|><|#endofattr#|>" -# Text = Text.replace(obj_A, f"<|#object#|>{obj_A}<|#endofobject#|><|#visual#|><|#box#|><|#endofattr#|>") -# Text = Text.replace(obj_B, f"<|#object#|>{obj_B}<|#endofobject#|><|#visual#|><|#box#|><|#endofattr#|>") -# return Text, obj_A, obj_B, visual_obj_A, visual_obj_B - - -def preprocess_visual_info(Text): - text = Text.split(" ") - for is_idx, t in enumerate(text): - if t == "is": - break - the_idx = is_idx - while text[the_idx] != "the": - the_idx -= 1 - obj_A = " ".join(text[the_idx+1:is_idx]) - second_the_idx = len(text) - 1 - while text[second_the_idx] != "the": - second_the_idx -= 1 - obj_B = " ".join(text[second_the_idx+1:]) - relation = " ".join(text[is_idx+1:second_the_idx]) - visual_obj_A = f"<|#object#|>the {obj_A}<|#endofobject#|><|#visual#|><|#box#|><|#endofobject#|>" - visual_obj_B = f"<|#object#|><|#previsual#|><|#prebox#|><|#object#|>the {obj_B}<|#endofobject#|>" - Text = f"{visual_obj_A} is {relation} {visual_obj_B}" - return Text, obj_A, visual_obj_A, obj_B, visual_obj_B, relation - - - - -def get_bbox(visual_box_list, batch_images, prompt, model, tokenizer, media_token_id, prebox_token_id, debug=False, return_all=False): - assert isinstance(prompt, list) and len(prompt) == 1 and isinstance(prompt[0], str) - encodings = tokenizer( - prompt, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - vision_x = batch_images.cuda() - lang_x = input_ids.cuda() - attention_mask = attention_mask.cuda() - - model.debug_id = 0 - with torch.inference_mode() and torch.cuda.amp.autocast(dtype=torch.float16): - outputs = model( - vision_x=vision_x, - lang_x=lang_x, - attention_mask=attention_mask, - labels=None, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=visual_box_list, - add_box=visual_box_list is not None, - relations=None, - debug_mode=False, - ) - boxes = outputs["boxes"] - scores = outputs["scores"] - if debug: - import pdb; pdb.set_trace() - if return_all: - return boxes, scores - if len(scores) == 0: - return None, None - else: - return boxes[scores.argmax()], scores.max() - - -def evaluate_aro( - model, - tokenizer, - image_processor, - vis_embed_size=None, - rank=0, - world_size=1, - id=0, - add_visual=True, - subset=False, - choose_left_right=False, -): - # os.makedirs(f"visualization/aro_results_{id}", exist_ok=True) - dataset_name = "aro" - media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] - box_token_id = tokenizer("<|#box#|>", add_special_tokens=False)["input_ids"][-1] - endofobject_token_id = tokenizer("<|#endofobject#|>", add_special_tokens=False)["input_ids"][-1] - endofattr_token_id = tokenizer("<|#endofattr#|>", add_special_tokens=False)["input_ids"][-1] - endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] - visual_token_id = tokenizer("<|#visual#|>", add_special_tokens=False)["input_ids"][-1] - previsual_token_id = tokenizer("<|#previsual#|>", add_special_tokens=False)["input_ids"][-1] - prebox_token_id = tokenizer("<|#prebox#|>", add_special_tokens=False)["input_ids"][-1] - model.eval().cuda() - total = 0 - n_top1 = 0 - n_top5 = 0 - from open_flamingo.eval.dataset_zoo import VG_Relation, VG_Attribution - vgr_dataset = VG_Relation(image_preprocess=None, download=True, root_dir="/gpfs/u/home/LMCG/LMCGljnn/scratch/code/vision-language-models-are-bows/data") - if subset: - subset_idx = json.load(open("aro_subset.json")) - pbar = tqdm(subset_idx, disable=(rank != 0)) - else: - pbar = tqdm(vgr_dataset, disable=(rank != 0)) - for ii, sample in enumerate(pbar): - if subset: - ORI_IDX = int(sample) - sample = vgr_dataset[sample] - if ii % world_size != rank: - continue - image = sample["image_options"][0] - # image = Image.open("/gpfs/u/home/LMCG/LMCGljnn/scratch/code/multimodal2/yolo.png").convert("RGB") - image = image.resize((224, 224)) - - text = sample["caption_options"][1] # 1 is true caption - # text = "the dog is sitting on the floor" if idx == 1 else "the floor is sitting on the dog" - batch_images = image_processor(image).unsqueeze(0).unsqueeze(1).unsqueeze(0) - text, obj_A, visual_obj_A, obj_B, visual_obj_B, relation = preprocess_visual_info(text) - - - first_text = f"<|#object#|>the {obj_A}<|#endofobject#|><|#visual#|>" - prompt = [f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>{first_text}"] - first_box, first_score = get_bbox(None, batch_images, prompt, model, tokenizer, media_token_id, prebox_token_id, return_all=False) - - if first_box is None: - text_A = "the " + obj_A - added_bbox_list = None - else: - text_A = visual_obj_A - added_bbox_list = [torch.tensor(first_box).unsqueeze(0).cuda() / 224] - - prompt = [f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>{text_A} is {relation}<|#object#|><|#previsual#|>"] - pre_boxes, pre_scores = get_bbox(added_bbox_list, batch_images, prompt, model, tokenizer, media_token_id, - prebox_token_id, return_all=True) - - if pre_boxes is None: - pre_boxes = [np.array([0.0, 0.0, 223.0, 223.0])] - pre_scores = [1.0] - - logits_list = [] - # pre_boxes = [pre_boxes[0]] - # pre_scores = [pre_scores[0]] - for pre_box, pre_score in zip(pre_boxes, pre_scores): - prompt = [f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>{text_A} is {relation}<|#object#|><|#previsual#|><|#prebox#|><|#object#|> the {obj_B}<|#endofobject#|>"] - - encodings = tokenizer( - prompt, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=512, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - vision_x = batch_images.cuda() - lang_x = input_ids.cuda() - attention_mask = attention_mask.cuda() - labels = lang_x.clone() - added_bbox_list = None - if add_visual: - added_bbox_list = [] - if first_box is not None: - added_bbox_list.append(torch.tensor(first_box).unsqueeze(0).cuda().float() / 224) - if pre_box is not None: - added_bbox_list.append(torch.tensor(pre_box).unsqueeze(0).cuda().float() / 224) - if added_bbox_list is not None and len(added_bbox_list) == 0: - added_bbox_list = None - - with torch.cuda.amp.autocast(dtype=torch.float16) and torch.no_grad(): - outputs = model( - vision_x=vision_x, - lang_x=lang_x, - attention_mask=attention_mask, - labels=labels, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=added_bbox_list, - add_box=added_bbox_list is not None, - relations=None, - ) - logits_list.append([pre_score, outputs.logits]) - pre_scores = np.array([x[0] for x in logits_list]) - final_probs = 0.0 - for score, (_, logits) in zip(pre_scores, logits_list): - final_probs += score * logits.softmax(-1) - assert input_ids.shape[:2] == final_probs.shape[:2] - _rank, is_top1, is_top5 = is_correct(input_ids, final_probs, tokenizer, obj_B, topk=5) - if is_top1: - n_top1 += 1 - if is_top5: - n_top5 += 1 - total += 1 - pbar.set_description(f"acc@top1: {n_top1 / total:.4f} | acc@top5: {n_top5 / total:.4f} | {_rank}") - - - with open(f"{dataset_name}_results_part{rank}_{id}.json", "w") as f: - f.write(json.dumps([total, n_top1, n_top5])) - if world_size > 1: - torch.distributed.barrier() - if rank == 0: - total = 0 - n_top1 = 0 - n_top5 = 0 - print(f"evaluate on rank {rank}. world size is {world_size}") - for rank_i in range(world_size): - [total_part, n_top1_part, n_top5_part] = json.load(open(f"{dataset_name}_results_part{rank_i}_{id}.json")) - os.remove(f"{dataset_name}_results_part{rank_i}_{id}.json") - total += total_part - n_top1 += n_top1_part - n_top5 += n_top5_part - acc_top1 = n_top1 / total - acc_top5 = n_top5 / total - print("acc_top1:", acc_top1, "acc_top5:", acc_top5, "total:", total) - with open(os.path.join("eval_results", f"{dataset_name}_{model.expr_name}_{model.step_num}_{int(time.time())}_{acc_top1}_{acc_top5}_{total}_{subset}"), "w") as f: - pass - else: - score = 0.0 - if world_size > 1: - torch.distributed.barrier() - return score - - -def evaluate_pisc( - model, - tokenizer, - image_processor, - batch_size, - tsvfile, - max_generation_length=20, - num_beams=3, - length_penalty=-2.0, - device=-1, - vis_embed_size=None, - rank=0, - world_size=1, - id=0, - add_visual=True, -): - from open_flamingo.train.instruction_template import PISC_TEMPLATES - dataset_name = "pisc" - media_token_id = tokenizer("<|#image#|>", add_special_tokens=False)["input_ids"][-1] - box_token_id = tokenizer("<|#box#|>", add_special_tokens=False)["input_ids"][-1] - endofobject_token_id = tokenizer("<|#endofobject#|>", add_special_tokens=False)["input_ids"][-1] - endofattr_token_id = tokenizer("<|#endofattr#|>", add_special_tokens=False)["input_ids"][-1] - endofmedia_token_id = tokenizer("<|#endofimage#|>", add_special_tokens=False)["input_ids"][-1] - visual_token_id = tokenizer("<|#visual#|>", add_special_tokens=False)["input_ids"][-1] - model.train().cuda() - - dataset = wds.WebDataset("/gpfs/u/home/LMCG/LMCGljnn/scratch-shared/junyan/raw/instruct/eval/pisc/000000.tar").decode().to_tuple("image_path.txt", "dataset.txt", "data.pyd") - pbar = tqdm(dataset, disable=(rank != 0)) - - rel_id_to_type = ["friends", "family", "couple", "professional", "commercial", "no relation"] - rel_type_to_id = {x: i for i, x in enumerate(rel_id_to_type)} - gt = [] - pred_scores = [] - for III, sample in enumerate(pbar): - if III % world_size != rank: - continue - image_path, dataset, data = sample - image = Image.open(image_path) - size = image_processor.transforms[0].size - image = image.resize((size, size)) - batch_images = image_processor(image).unsqueeze(0).unsqueeze(1).unsqueeze(0) - boxA = data[0] - boxB = data[1] - gt_relation = data[2] - losses = [] - for i_rel, option_rel in enumerate(rel_id_to_type): - text = PISC_TEMPLATES[0].format(relation=option_rel) - added_bbox = [ - torch.tensor([boxA]).cuda(), - torch.tensor([boxB]).cuda(), - ] - caption = f"{tokenizer.bos_token}<|#image#|>{tokenizer.pad_token*vis_embed_size}<|#endofimage#|>{text}{tokenizer.eos_token}" - encodings = tokenizer( - caption, - padding="longest", - truncation=True, - return_tensors="pt", - max_length=2000, - ) - input_ids = encodings["input_ids"] - attention_mask = encodings["attention_mask"] - image_start_index_list = ((input_ids == media_token_id).nonzero(as_tuple=True)[-1] + 1).tolist() - image_start_index_list = [[x] for x in image_start_index_list] - image_nums = [1] * len(input_ids) - vision_x = batch_images.cuda() - lang_x = input_ids.cuda() - attention_mask = attention_mask.cuda() - - labels = lang_x.clone() - labels[labels == tokenizer.pad_token_id] = -100 - if add_visual: - # endofattr_next_token_index = list((labels == endofattr_token_id).nonzero(as_tuple=True)) - # endofattr_next_token_index[1] += 1 - # endofattr_next_token_id = labels[endofattr_next_token_index] - # NEXT_WORD - # predict NEXT_WORD - # predict nothing - labels[labels == visual_token_id] = -100 - labels[labels == box_token_id] = -100 - labels[labels == endofattr_token_id] = -100 - # labels[endofattr_next_token_index] = -100 - labels[:, 0] = -100 - answer_token_id = tokenizer(" Answer").input_ids[0] - answer_token_loc = (input_ids == answer_token_id).nonzero() - for batch_idx, idx in answer_token_loc: - labels[batch_idx][:idx+2] = -100 - - with torch.cuda.amp.autocast(dtype=torch.float16) and torch.no_grad(): - outputs = model( - vision_x=vision_x, - lang_x=lang_x, - attention_mask=attention_mask, - labels=labels, - image_nums=image_nums, - image_start_index_list=image_start_index_list, - added_bbox_list=added_bbox, - add_box=added_bbox is not None, - ) - loss_total = outputs.loss.reshape(labels.shape[0], -1) - loss = loss_total.sum() / (loss_total != 0).sum() - losses.append(loss.item()) - pred_scores.append(np.exp(-np.array(losses)) / np.exp(-np.array(losses)).sum()) - gt.append(rel_type_to_id[gt_relation]) - gt = np.array(gt) - pred_scores = np.array(pred_scores) - pred = pred_scores.argmax(1) - - - print("total num:", len(gt)) - recalls = recall_score(y_true=gt, y_pred=pred, average=None, labels=[0,1,2,3,4,5]) - print("recalls:", recalls) - - with open(f"{dataset_name}_results_part{rank}_{id}.json", "w") as f: - f.write(json.dumps([gt.tolist(), pred.tolist()])) - if world_size > 1: - torch.distributed.barrier() - if rank == 0: - gt = [] - pred = [] - print(f"evaluate on rank {rank}. world size is {world_size}") - for rank_i in range(world_size): - [gt_part, pred_part] = json.load(open(f"{dataset_name}_results_part{rank_i}_{id}.json")) - os.remove(f"{dataset_name}_results_part{rank_i}_{id}.json") - gt.extend(gt_part) - pred.extend(pred_part) - print("total num:", len(gt)) - recalls = recall_score(y_true=gt, y_pred=pred, average=None, labels=[0,1,2,3,4,5]) - print("recalls:", recalls) - with open(os.path.join("eval_results", f"{dataset_name}_{model.expr_name}_{model.step_num}_{int(time.time())}"), "w") as f: - f.write(f"{gt}\n") - f.write(f"{pred}\n") - f.write(f"{recalls}\n") - score = 0.0 - if world_size > 1: - torch.distributed.barrier() - return score - - - -if __name__ == "__main__": - main() diff --git a/spaces/chendl/compositional_test/transformers/scripts/fsmt/convert-allenai-wmt19.sh b/spaces/chendl/compositional_test/transformers/scripts/fsmt/convert-allenai-wmt19.sh deleted file mode 100644 index ef8fa3d4186de1b25d841fc3f8d0a8c0b7c99995..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/scripts/fsmt/convert-allenai-wmt19.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# this script acquires data and converts it to fsmt model -# it covers: -# - allenai/wmt19-de-en-6-6-base -# - allenai/wmt19-de-en-6-6-big - -# this script needs to be run from the top level of the transformers repo -if [ ! -d "src/transformers" ]; then - echo "Error: This script needs to be run from the top of the transformers repo" - exit 1 -fi - -mkdir data - -# get data (run once) - -cd data -gdown 'https://drive.google.com/uc?id=1j6z9fYdlUyOYsh7KJoumRlr1yHczxR5T' -gdown 'https://drive.google.com/uc?id=1yT7ZjqfvUYOBXvMjeY8uGRHQFWoSo8Q5' -gdown 'https://drive.google.com/uc?id=15gAzHeRUCs-QV8vHeTReMPEh1j8excNE' -tar -xvzf wmt19.de-en.tar.gz -tar -xvzf wmt19_deen_base_dr0.1_1.tar.gz -tar -xvzf wmt19_deen_big_dr0.1_2.tar.gz -cp wmt19.de-en/data-bin/dict.*.txt wmt19_deen_base_dr0.1_1 -cp wmt19.de-en/data-bin/dict.*.txt wmt19_deen_big_dr0.1_2 -cd - - -# run conversions and uploads - -PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19_deen_base_dr0.1_1/checkpoint_last3_avg.pt --pytorch_dump_folder_path data/wmt19-de-en-6-6-base - -PYTHONPATH="src" python src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py --fsmt_checkpoint_path data/wmt19_deen_big_dr0.1_2/checkpoint_last3_avg.pt --pytorch_dump_folder_path data/wmt19-de-en-6-6-big - - -# upload -cd data -transformers-cli upload -y wmt19-de-en-6-6-base -transformers-cli upload -y wmt19-de-en-6-6-big -cd - - - -# if updating just small files and not the large models, here is a script to generate the right commands: -perl -le 'for $f (@ARGV) { print qq[transformers-cli upload -y $_/$f --filename $_/$f] for ("wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big")}' vocab-src.json vocab-tgt.json tokenizer_config.json config.json -# add/remove files as needed - diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py b/spaces/chendl/compositional_test/transformers/src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py deleted file mode 100644 index a032ee93b03db82216f29e2ce20f9af833980851..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Convert slow tokenizers checkpoints in fast (serialization format of the `tokenizers` library)""" - -import argparse -import os - -import transformers - -from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS -from .utils import logging - - -logging.set_verbosity_info() - -logger = logging.get_logger(__name__) - - -TOKENIZER_CLASSES = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} - - -def convert_slow_checkpoint_to_fast(tokenizer_name, checkpoint_name, dump_path, force_download): - if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: - raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.") - - if tokenizer_name is None: - tokenizer_names = TOKENIZER_CLASSES - else: - tokenizer_names = {tokenizer_name: getattr(transformers, tokenizer_name + "Fast")} - - logger.info(f"Loading tokenizer classes: {tokenizer_names}") - - for tokenizer_name in tokenizer_names: - tokenizer_class = TOKENIZER_CLASSES[tokenizer_name] - - add_prefix = True - if checkpoint_name is None: - checkpoint_names = list(tokenizer_class.max_model_input_sizes.keys()) - else: - checkpoint_names = [checkpoint_name] - - logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}") - - for checkpoint in checkpoint_names: - logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}") - - # Load tokenizer - tokenizer = tokenizer_class.from_pretrained(checkpoint, force_download=force_download) - - # Save fast tokenizer - logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}") - - # For organization names we create sub-directories - if "/" in checkpoint: - checkpoint_directory, checkpoint_prefix_name = checkpoint.split("/") - dump_path_full = os.path.join(dump_path, checkpoint_directory) - elif add_prefix: - checkpoint_prefix_name = checkpoint - dump_path_full = dump_path - else: - checkpoint_prefix_name = None - dump_path_full = dump_path - - logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}") - - if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]: - file_path = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint] - next_char = file_path.split(checkpoint)[-1][0] - if next_char == "/": - dump_path_full = os.path.join(dump_path_full, checkpoint_prefix_name) - checkpoint_prefix_name = None - - logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}") - - file_names = tokenizer.save_pretrained( - dump_path_full, legacy_format=False, filename_prefix=checkpoint_prefix_name - ) - logger.info(f"=> File names {file_names}") - - for file_name in file_names: - if not file_name.endswith("tokenizer.json"): - os.remove(file_name) - logger.info(f"=> removing {file_name}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - # Required parameters - parser.add_argument( - "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." - ) - parser.add_argument( - "--tokenizer_name", - default=None, - type=str, - help=( - f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " - "download and convert all the checkpoints from AWS." - ), - ) - parser.add_argument( - "--checkpoint_name", - default=None, - type=str, - help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", - ) - parser.add_argument( - "--force_download", - action="store_true", - help="Re-download checkpoints.", - ) - args = parser.parse_args() - - convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/streams/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/streams/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/db/migrations/00001-migration-1.sqlite.sql b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/db/migrations/00001-migration-1.sqlite.sql deleted file mode 100644 index a214bae8d5b0d6482fedd18265d4dfc756d47485..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chromadb/test/db/migrations/00001-migration-1.sqlite.sql +++ /dev/null @@ -1,3 +0,0 @@ -CREATE TABLE table1 ( - name TEXT PRIMARY KEY -); diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-b04fff44.js b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-b04fff44.js deleted file mode 100644 index 34bc3fddc9cedfdcec2699cdef32381f544ad61e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-b04fff44.js +++ /dev/null @@ -1,2 +0,0 @@ -import{C as e}from"./Column-824a6363.js";import"./index-f877dfd5.js";/* empty css */const m=["static"];export{e as Component,m as modes}; -//# sourceMappingURL=index-b04fff44.js.map diff --git a/spaces/cihyFjudo/fairness-paper-search/DVD Slim Free 1.8.0.10 Portable projectmyskills Create and Print Custom Covers for Your Discs.md b/spaces/cihyFjudo/fairness-paper-search/DVD Slim Free 1.8.0.10 Portable projectmyskills Create and Print Custom Covers for Your Discs.md deleted file mode 100644 index 7af5af2ce75c5fc875626e6afcc0dcf89909a173..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/DVD Slim Free 1.8.0.10 Portable projectmyskills Create and Print Custom Covers for Your Discs.md +++ /dev/null @@ -1,6 +0,0 @@ -

      DVD Slim Free 1.8.0.10 Portable {projectmyskills}


      Download >>>>> https://tinurli.com/2uwkmM



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cihyFjudo/fairness-paper-search/Space cad 5 How to design build and fly model rockets with ease.md b/spaces/cihyFjudo/fairness-paper-search/Space cad 5 How to design build and fly model rockets with ease.md deleted file mode 100644 index d0af7e97b1fb8e3e54746058580dd62e54d7329f..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Space cad 5 How to design build and fly model rockets with ease.md +++ /dev/null @@ -1,23 +0,0 @@ - -

      This research aimed to evaluate the effect of cement space on the marginal discrepancy and retention of computer-aided design/computer-aided manufacturing (CAD/CAM) crowns. A total of 30 premolar Frasaco teeth were machined to receive crowns with cement spaces of 70, 90, and 110 μm. The marginal discrepancy measurements were done before and after cementation. Pull-off test was conducted using universal testing machine (UTM). Data was analyzed using two-way mixed ANOVA with post-hoc Bonferroni test and Kruskal-Wallis test. The crowns with cement space of 70 μm showed a significantly higher absolute marginal discrepancy than those with 90 and 110 μm. No significant effect on the crown retention was found. Within the limitations of this study, modifying cement space to 90 μm and 110 μm may improve the marginal adaptation of CAD/CAM crown, whereas adjusting cement space from 70 to 110 μm did not significantly affect the crown retention.

      -

      space cad 5


      Download Zip 🗸🗸🗸 https://tinurli.com/2uwjSj



      -

      You will notice that the Viewport Scale in the charts below indicate a scale with the suffix XP. The suffix is AutoCAD nomenclature for changing the scale within a viewport. For instance, you would be in paper space on a sheet, then you would enter model space within the viewport, then you would type Z or Zoom and enter 96xp to scale the drawing to 1/8" = 1'-0" in paper space. Said a different way by Autodesk, "You can change the view scale of the viewport by using the XP option of the ZOOM command when model space is accessed from within a layout viewport."

      -

      If you are using space objects already, you are aware that regular objects can be used as boundary objects for spaces. Now you can also use a "space separator" tool that lets you separate spaces when no real object is there to separate them. This could be the case when you have different functional areas within the same room. This command can only be run from the tool palette as a tool. You can find this tool in the Design Tool Palette.

      -

      At the "Specify first point:" and "Specify next point:" prompts, you can pick a series of points to define the separator line. The space separator uses a polyline to generate the separating line so that you can create a multi-segment separator line and split the room to the shape you want. Refer to the figures below.

      -

      -

      This is one of the most welcomed features regarding spaces. When a space is created/generated, it is moved to the back of the drawing order immediately after creation. That way, building elements and annotation do not get obscured by the space. You will save lots of time avoiding selecting spaces and setting their draw order to "back."

      -

      The depiction of Earth from space was based on satellite images provided to the Bank by Natural Resources Canada. The Great Lakes, Hudson Bay and the Gulf of St Lawrence make the view of Canada easily recognizable.

      -

      Batteries powering satellites or spacecraft must be rugged enough to withstand the severe vibrations of launch. Once the craft is deployed, these batteries must operate in extreme conditions of heat and cold and solar radiation. And, they need to work in a vacuum without leaking or exploding. There are many types of batteries: carbon-zinc, lead-acid, nickel-cadmium, nickel-hydrogen, silver zinc, alkaline, and lithium-ion to name a few.

      -

      Most batteries currently used in space flight are nickel-cadmium. Also called NI-Cad, these batteries are charged by solar cells that convert the Sun's energy to electricity. But Ni-Cad batteries eventually wear out and aren't rechargeable. Space Technology 5's small-sats will use Lithion-ion, or Li-ion, batteries, which use chemicals to store energy. And each cell of a Li-ion battery is equipped with a control circuit to limit the voltage peaks during charge and to prevent the voltage from dropping too low on discharge. This control circuit also limits the maximum charge and discharge current.

      -

      At the moment, many fashion entrepreneurs are most likely using Adobe Illustrator to take their designs into the digital space. Still, the good news is that there is more than one software solution available to help you start your clothing line. When it comes to choosing which one is best for you, it is a good idea to base your decision on what will work for your business in the long run. Think about whether you want a one-time lifetime license or subscription-based model. Also, bear in mind that using more than one software solution to create digital fashion sketches, digital samples, and develop prototypes is also a viable option.

      -

      However, a fleet of Republic ships, led by Jedi Knight Anakin Skywalker, came to Devaron, and in the ensuing battle, most frigates were destroyed, leaving only the vessel with Bane and Ropal on board. Skywalker had his fleet destroy the hyperdrive of the Separatist flagship, preventing him from going into hyperspace. Bane then attempted to force Ropal to open it by torturing him. However, Ropal died due to the unbearable torture before he could open the holocron. To the exasperated Gunray's relief, Bane then came up with a plan to force Skywalker and his apprentice to open the holocron. Bane arranged an ambush on the gun deck for Skywalker, Tano, Rex, and their troops.[26]

      -

      After obtaining the holocron from the Jedi Temple, Darth Sidious tasked Cad Bane with kidnapping four Force-sensitive children for a secret project on Mustafar. After the bounty hunter got away the Rodian child Wee Dunn from Rodia and the Nautolan child Zinn Toa from Glee Anselm, Skywalker and Tano arrested him as he attempted to retrieve Roo-Roo Page from Jan-gwa city on Naboo. The two Jedi, along with Masters Mace Windu and Obi-Wan Kenobi, attempted to interrogate the captive Bane, with little success aboard the Resolute, reaching the conclusion that the latter probably dreaded his employer than he feared them. Apparently yielding to the torturous mind-probing, Bane relented to taking Windu, Kenobi, and Cody to the space station, where he claimed that both the children and the holocron were; only the holocron was present. However, the bounty hunter was able to have the Jedi trigger a trap and made his escape.[27]

      -

      Initial development of the Star Wars: The Clone Wars television series focused on presenting the conflict between the Galactic Republic and the Separatists, leaving no room for third-party villains, but as the storyline progressed, the need for a bounty hunter character emerged. Supervising director Dave Filoni and writer Henry Gilroy initially considered reusing the Gen'Dai bounty hunter Durge from the 2003 Star Wars Legends Star Wars: Clone Wars cartoon. This idea was eventually scrapped, and Bane was reimagined as an in-universe equivalent of classical Western characters, based on Lee Van Cleef's "Angel Eyes" from The Good, The Bad, and the Ugly. During this research, Filoni came across early concept art of a gun-toting spacer wearing a hat in drafts of the original Star Wars trilogy.[44]

      -

      The tooth was anaesthetized with 2% lidocaine with 1:2,00,000 adrenaline and extraction was performed. The patient was prescribed antibiotics and analgesics for the same. The patient was recalled after one day for evaluation of postoperative pain and swelling. Seven days postextraction [Table/Fig-2], impression of the upper and lower arches was made with elastomeric impression material. The impression was sent to the lab for fabrication of space maintainer. BruxZir zirconia was the material of choice for fabrication of the space maintainer. BruxZir is a solid monolithic zirconia material which utilizes CAD/CAM technology for its designing and milling of the restoration. A gingival shade was added to the appliance to make it more aesthetically pleasing [Table/Fig-3].

      -

      After six months, the patient was completely asymptomatic. No signs of gingival inflammation and tissue irritation were seen at the site of the space maintainer. The patient had no difficulty in mastication [Table/Fig-6].

      -

      Premature loss of primary molars also cause a reduction in arch length. According to Northway WM, when the maxillary primary first molar is lost prematurely, the first premolar erupts in a more mesial direction than normal, due to the mesial incline of the primary second molar, and consumes the space of the permanent canine, which becomes blocked out, exhibiting a more deleterious effect [2]. In a study by Richardson, he stated that maximum amount of space loss occurs in the first six months post extraction [3]. Hence, the space maintainer was recommended immediately after extraction of the tooth.

      -

      Various space maintainers as band and loop, crown and loop and lingual arch space maintainers are routinely used in dentistry. Certain disadvantages as corrosion and appliance fracture, have led to the development of more aesthetic and metal-free space maintainers which include Fibre Reinforced Composite Resin (FRCR) space maintainer and CAD-CAM aided ceramic band and loop space maintainer. The comparison between the aesthetic space maintainers is depicted in [Table/Fig-7].

      -

      BruxZir zirconia has high flexural strength (up to 1,465 MPa) and exhibits three to five times the fracture toughness of typical zirconia. This property gives it high impact resistance to the high masticatory forces in the mouth. It also has excellent resistance to thermal shock with a low thermal expansion which means the restorations will remain stable in the mouth on intake of hot and cold fluids. BruxZir is available in all the vita classic and gingival shades which would fulfill the aesthetic demands of the patient [1]. Hence, BruxZir was considered as the ideal material for fabrication of the space maintainer.

      -

      The design of the space maintainer was such that it was supported by the both the canine and the primary second molar to allow for better retention of the appliance and also to prevent tipping of the teeth. The masticatory forces applied in the region of the extracted tooth would be equally distributed on the canine and second primary molar preventing it from getting dislodged. The gingival loop was kept close to the gingiva to prevent food lodgment in the area.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/[P3D V3 V4] FlightBeam KSFO HD PC A New and Improved Version of the Popular KSFO Scenery.md b/spaces/cihyFjudo/fairness-paper-search/[P3D V3 V4] FlightBeam KSFO HD PC A New and Improved Version of the Popular KSFO Scenery.md deleted file mode 100644 index 2c359bc42a880854298f2af913f1277d7a77924a..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/[P3D V3 V4] FlightBeam KSFO HD PC A New and Improved Version of the Popular KSFO Scenery.md +++ /dev/null @@ -1,6 +0,0 @@ -

      [P3D V3 V4] FlightBeam KSFO HD PC


      Download File ––– https://tinurli.com/2uwiNR



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/altair/vegalite/v5/display.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/altair/vegalite/v5/display.py deleted file mode 100644 index ba69e02e076b0828a9b2032eb47de8c1fb1492d8..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/altair/vegalite/v5/display.py +++ /dev/null @@ -1,119 +0,0 @@ -import os - -from ...utils.mimebundle import spec_to_mimebundle -from ..display import Displayable -from ..display import default_renderer_base -from ..display import json_renderer_base -from ..display import RendererRegistry -from ..display import HTMLRenderer - -from .schema import SCHEMA_VERSION - -VEGALITE_VERSION = SCHEMA_VERSION.lstrip("v") -VEGA_VERSION = "5" -VEGAEMBED_VERSION = "6" - - -# ============================================================================== -# VegaLite v5 renderer logic -# ============================================================================== - - -# The MIME type for Vega-Lite 5.x releases. -VEGALITE_MIME_TYPE = "application/vnd.vegalite.v5+json" # type: str - -# The entry point group that can be used by other packages to declare other -# renderers that will be auto-detected. Explicit registration is also -# allowed by the PluginRegistery API. -ENTRY_POINT_GROUP = "altair.vegalite.v5.renderer" # type: str - -# The display message when rendering fails -DEFAULT_DISPLAY = """\ - - -If you see this message, it means the renderer has not been properly enabled -for the frontend that you are using. For more information, see -https://altair-viz.github.io/user_guide/display_frontends.html#troubleshooting -""" - -renderers = RendererRegistry(entry_point_group=ENTRY_POINT_GROUP) - -here = os.path.dirname(os.path.realpath(__file__)) - - -def mimetype_renderer(spec, **metadata): - return default_renderer_base(spec, VEGALITE_MIME_TYPE, DEFAULT_DISPLAY, **metadata) - - -def json_renderer(spec, **metadata): - return json_renderer_base(spec, DEFAULT_DISPLAY, **metadata) - - -def png_renderer(spec, **metadata): - return spec_to_mimebundle( - spec, - format="png", - mode="vega-lite", - vega_version=VEGA_VERSION, - vegaembed_version=VEGAEMBED_VERSION, - vegalite_version=VEGALITE_VERSION, - **metadata, - ) - - -def svg_renderer(spec, **metadata): - return spec_to_mimebundle( - spec, - format="svg", - mode="vega-lite", - vega_version=VEGA_VERSION, - vegaembed_version=VEGAEMBED_VERSION, - vegalite_version=VEGALITE_VERSION, - **metadata, - ) - - -html_renderer = HTMLRenderer( - mode="vega-lite", - template="universal", - vega_version=VEGA_VERSION, - vegaembed_version=VEGAEMBED_VERSION, - vegalite_version=VEGALITE_VERSION, -) - -renderers.register("default", html_renderer) -renderers.register("html", html_renderer) -renderers.register("colab", html_renderer) -renderers.register("kaggle", html_renderer) -renderers.register("zeppelin", html_renderer) -renderers.register("mimetype", mimetype_renderer) -renderers.register("jupyterlab", mimetype_renderer) -renderers.register("nteract", mimetype_renderer) -renderers.register("json", json_renderer) -renderers.register("png", png_renderer) -renderers.register("svg", svg_renderer) -renderers.enable("default") - - -class VegaLite(Displayable): - """An IPython/Jupyter display class for rendering VegaLite 5.""" - - renderers = renderers - schema_path = (__name__, "schema/vega-lite-schema.json") - - -def vegalite(spec, validate=True): - """Render and optionally validate a VegaLite 5 spec. - - This will use the currently enabled renderer to render the spec. - - Parameters - ========== - spec: dict - A fully compliant VegaLite 5 spec, with the data portion fully processed. - validate: bool - Should the spec be validated against the VegaLite 5 schema? - """ - from IPython.display import display - - display(VegaLite(spec, validate=validate)) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacsbrdata.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacsbrdata.h deleted file mode 100644 index 7bb45b229e29ef1c14ca61074ae82c973e2372c3..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacsbrdata.h +++ /dev/null @@ -1,703 +0,0 @@ -/* - * AAC Spectral Band Replication decoding data - * Copyright (c) 2008-2009 Robert Swain ( rob opendot cl ) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * AAC Spectral Band Replication decoding data - * @author Robert Swain ( rob opendot cl ) - */ - -#ifndef AVCODEC_AACSBRDATA_H -#define AVCODEC_AACSBRDATA_H - -#include -#include "libavutil/attributes_internal.h" -#include "libavutil/mem_internal.h" -#include "aac_defines.h" - -///< Huffman tables for SBR - -static const uint8_t t_huffman_env_1_5dB_bits[121] = { - 18, 18, 18, 18, 18, 18, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 17, 18, 16, 17, 18, 17, - 16, 16, 16, 16, 15, 14, 14, 13, - 13, 12, 11, 10, 9, 8, 7, 6, - 5, 4, 3, 2, 2, 3, 4, 5, - 6, 7, 8, 9, 10, 12, 13, 14, - 14, 15, 16, 17, 16, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, -}; - -static const uint32_t t_huffman_env_1_5dB_codes[121] = { - 0x3ffd6, 0x3ffd7, 0x3ffd8, 0x3ffd9, 0x3ffda, 0x3ffdb, 0x7ffb8, 0x7ffb9, - 0x7ffba, 0x7ffbb, 0x7ffbc, 0x7ffbd, 0x7ffbe, 0x7ffbf, 0x7ffc0, 0x7ffc1, - 0x7ffc2, 0x7ffc3, 0x7ffc4, 0x7ffc5, 0x7ffc6, 0x7ffc7, 0x7ffc8, 0x7ffc9, - 0x7ffca, 0x7ffcb, 0x7ffcc, 0x7ffcd, 0x7ffce, 0x7ffcf, 0x7ffd0, 0x7ffd1, - 0x7ffd2, 0x7ffd3, 0x1ffe6, 0x3ffd4, 0x0fff0, 0x1ffe9, 0x3ffd5, 0x1ffe7, - 0x0fff1, 0x0ffec, 0x0ffed, 0x0ffee, 0x07ff4, 0x03ff9, 0x03ff7, 0x01ffa, - 0x01ff9, 0x00ffb, 0x007fc, 0x003fc, 0x001fd, 0x000fd, 0x0007d, 0x0003d, - 0x0001d, 0x0000d, 0x00005, 0x00001, 0x00000, 0x00004, 0x0000c, 0x0001c, - 0x0003c, 0x0007c, 0x000fc, 0x001fc, 0x003fd, 0x00ffa, 0x01ff8, 0x03ff6, - 0x03ff8, 0x07ff5, 0x0ffef, 0x1ffe8, 0x0fff2, 0x7ffd4, 0x7ffd5, 0x7ffd6, - 0x7ffd7, 0x7ffd8, 0x7ffd9, 0x7ffda, 0x7ffdb, 0x7ffdc, 0x7ffdd, 0x7ffde, - 0x7ffdf, 0x7ffe0, 0x7ffe1, 0x7ffe2, 0x7ffe3, 0x7ffe4, 0x7ffe5, 0x7ffe6, - 0x7ffe7, 0x7ffe8, 0x7ffe9, 0x7ffea, 0x7ffeb, 0x7ffec, 0x7ffed, 0x7ffee, - 0x7ffef, 0x7fff0, 0x7fff1, 0x7fff2, 0x7fff3, 0x7fff4, 0x7fff5, 0x7fff6, - 0x7fff7, 0x7fff8, 0x7fff9, 0x7fffa, 0x7fffb, 0x7fffc, 0x7fffd, 0x7fffe, - 0x7ffff, -}; - -static const uint8_t f_huffman_env_1_5dB_bits[121] = { - 19, 19, 20, 20, 20, 20, 20, 20, - 20, 19, 20, 20, 20, 20, 19, 20, - 19, 19, 20, 18, 20, 20, 20, 19, - 20, 20, 20, 19, 20, 19, 18, 19, - 18, 18, 17, 18, 17, 17, 17, 16, - 16, 16, 15, 15, 14, 13, 13, 12, - 12, 11, 10, 9, 9, 8, 7, 6, - 5, 4, 3, 2, 2, 3, 4, 5, - 6, 8, 8, 9, 10, 11, 11, 11, - 12, 12, 13, 13, 14, 14, 16, 16, - 17, 17, 18, 18, 18, 18, 18, 18, - 18, 20, 19, 20, 20, 20, 20, 20, - 20, 19, 20, 20, 20, 20, 19, 20, - 18, 20, 20, 19, 19, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 20, - 20, -}; - -static const uint32_t f_huffman_env_1_5dB_codes[121] = { - 0x7ffe7, 0x7ffe8, 0xfffd2, 0xfffd3, 0xfffd4, 0xfffd5, 0xfffd6, 0xfffd7, - 0xfffd8, 0x7ffda, 0xfffd9, 0xfffda, 0xfffdb, 0xfffdc, 0x7ffdb, 0xfffdd, - 0x7ffdc, 0x7ffdd, 0xfffde, 0x3ffe4, 0xfffdf, 0xfffe0, 0xfffe1, 0x7ffde, - 0xfffe2, 0xfffe3, 0xfffe4, 0x7ffdf, 0xfffe5, 0x7ffe0, 0x3ffe8, 0x7ffe1, - 0x3ffe0, 0x3ffe9, 0x1ffef, 0x3ffe5, 0x1ffec, 0x1ffed, 0x1ffee, 0x0fff4, - 0x0fff3, 0x0fff0, 0x07ff7, 0x07ff6, 0x03ffa, 0x01ffa, 0x01ff9, 0x00ffa, - 0x00ff8, 0x007f9, 0x003fb, 0x001fc, 0x001fa, 0x000fb, 0x0007c, 0x0003c, - 0x0001c, 0x0000c, 0x00005, 0x00001, 0x00000, 0x00004, 0x0000d, 0x0001d, - 0x0003d, 0x000fa, 0x000fc, 0x001fb, 0x003fa, 0x007f8, 0x007fa, 0x007fb, - 0x00ff9, 0x00ffb, 0x01ff8, 0x01ffb, 0x03ff8, 0x03ff9, 0x0fff1, 0x0fff2, - 0x1ffea, 0x1ffeb, 0x3ffe1, 0x3ffe2, 0x3ffea, 0x3ffe3, 0x3ffe6, 0x3ffe7, - 0x3ffeb, 0xfffe6, 0x7ffe2, 0xfffe7, 0xfffe8, 0xfffe9, 0xfffea, 0xfffeb, - 0xfffec, 0x7ffe3, 0xfffed, 0xfffee, 0xfffef, 0xffff0, 0x7ffe4, 0xffff1, - 0x3ffec, 0xffff2, 0xffff3, 0x7ffe5, 0x7ffe6, 0xffff4, 0xffff5, 0xffff6, - 0xffff7, 0xffff8, 0xffff9, 0xffffa, 0xffffb, 0xffffc, 0xffffd, 0xffffe, - 0xfffff, -}; - -static const uint8_t t_huffman_env_bal_1_5dB_bits[49] = { - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 12, 11, 9, 7, 5, 3, - 1, 2, 4, 6, 8, 11, 12, 15, - 16, 16, 16, 16, 16, 16, 16, 17, - 17, 17, 17, 17, 17, 17, 17, 17, - 17, -}; - -static const uint32_t t_huffman_env_bal_1_5dB_codes[49] = { - 0x0ffe4, 0x0ffe5, 0x0ffe6, 0x0ffe7, 0x0ffe8, 0x0ffe9, 0x0ffea, 0x0ffeb, - 0x0ffec, 0x0ffed, 0x0ffee, 0x0ffef, 0x0fff0, 0x0fff1, 0x0fff2, 0x0fff3, - 0x0fff4, 0x0ffe2, 0x00ffc, 0x007fc, 0x001fe, 0x0007e, 0x0001e, 0x00006, - 0x00000, 0x00002, 0x0000e, 0x0003e, 0x000fe, 0x007fd, 0x00ffd, 0x07ff0, - 0x0ffe3, 0x0fff5, 0x0fff6, 0x0fff7, 0x0fff8, 0x0fff9, 0x0fffa, 0x1fff6, - 0x1fff7, 0x1fff8, 0x1fff9, 0x1fffa, 0x1fffb, 0x1fffc, 0x1fffd, 0x1fffe, - 0x1ffff, -}; - -static const uint8_t f_huffman_env_bal_1_5dB_bits[49] = { - 18, 18, 18, 18, 18, 18, 18, 18, - 18, 18, 18, 18, 18, 18, 18, 16, - 17, 14, 11, 11, 8, 7, 4, 2, - 1, 3, 5, 6, 9, 11, 12, 15, - 16, 18, 18, 18, 18, 18, 18, 18, - 18, 18, 18, 18, 18, 18, 18, 19, - 19, -}; - -static const uint32_t f_huffman_env_bal_1_5dB_codes[49] = { - 0x3ffe2, 0x3ffe3, 0x3ffe4, 0x3ffe5, 0x3ffe6, 0x3ffe7, 0x3ffe8, 0x3ffe9, - 0x3ffea, 0x3ffeb, 0x3ffec, 0x3ffed, 0x3ffee, 0x3ffef, 0x3fff0, 0x0fff7, - 0x1fff0, 0x03ffc, 0x007fe, 0x007fc, 0x000fe, 0x0007e, 0x0000e, 0x00002, - 0x00000, 0x00006, 0x0001e, 0x0003e, 0x001fe, 0x007fd, 0x00ffe, 0x07ffa, - 0x0fff6, 0x3fff1, 0x3fff2, 0x3fff3, 0x3fff4, 0x3fff5, 0x3fff6, 0x3fff7, - 0x3fff8, 0x3fff9, 0x3fffa, 0x3fffb, 0x3fffc, 0x3fffd, 0x3fffe, 0x7fffe, - 0x7ffff, -}; - -static const uint8_t t_huffman_env_3_0dB_bits[63] = { - 18, 18, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 17, 16, 16, 16, 14, 14, 14, - 13, 12, 11, 8, 6, 4, 2, 1, - 3, 5, 7, 9, 11, 13, 14, 14, - 15, 16, 17, 18, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, 19, - 19, 19, 19, 19, 19, 19, 19, -}; - -static const uint32_t t_huffman_env_3_0dB_codes[63] = { - 0x3ffed, 0x3ffee, 0x7ffde, 0x7ffdf, 0x7ffe0, 0x7ffe1, 0x7ffe2, 0x7ffe3, - 0x7ffe4, 0x7ffe5, 0x7ffe6, 0x7ffe7, 0x7ffe8, 0x7ffe9, 0x7ffea, 0x7ffeb, - 0x7ffec, 0x1fff4, 0x0fff7, 0x0fff9, 0x0fff8, 0x03ffb, 0x03ffa, 0x03ff8, - 0x01ffa, 0x00ffc, 0x007fc, 0x000fe, 0x0003e, 0x0000e, 0x00002, 0x00000, - 0x00006, 0x0001e, 0x0007e, 0x001fe, 0x007fd, 0x01ffb, 0x03ff9, 0x03ffc, - 0x07ffa, 0x0fff6, 0x1fff5, 0x3ffec, 0x7ffed, 0x7ffee, 0x7ffef, 0x7fff0, - 0x7fff1, 0x7fff2, 0x7fff3, 0x7fff4, 0x7fff5, 0x7fff6, 0x7fff7, 0x7fff8, - 0x7fff9, 0x7fffa, 0x7fffb, 0x7fffc, 0x7fffd, 0x7fffe, 0x7ffff, -}; - -static const uint8_t f_huffman_env_3_0dB_bits[63] = { - 20, 20, 20, 20, 20, 20, 20, 18, - 19, 19, 19, 19, 18, 18, 20, 19, - 17, 18, 17, 16, 16, 15, 14, 12, - 11, 10, 9, 8, 6, 4, 2, 1, - 3, 5, 8, 9, 10, 11, 12, 13, - 14, 15, 15, 16, 16, 17, 17, 18, - 18, 18, 20, 19, 19, 19, 20, 19, - 19, 20, 20, 20, 20, 20, 20, -}; - -static const uint32_t f_huffman_env_3_0dB_codes[63] = { - 0xffff0, 0xffff1, 0xffff2, 0xffff3, 0xffff4, 0xffff5, 0xffff6, 0x3fff3, - 0x7fff5, 0x7ffee, 0x7ffef, 0x7fff6, 0x3fff4, 0x3fff2, 0xffff7, 0x7fff0, - 0x1fff5, 0x3fff0, 0x1fff4, 0x0fff7, 0x0fff6, 0x07ff8, 0x03ffb, 0x00ffd, - 0x007fd, 0x003fd, 0x001fd, 0x000fd, 0x0003e, 0x0000e, 0x00002, 0x00000, - 0x00006, 0x0001e, 0x000fc, 0x001fc, 0x003fc, 0x007fc, 0x00ffc, 0x01ffc, - 0x03ffa, 0x07ff9, 0x07ffa, 0x0fff8, 0x0fff9, 0x1fff6, 0x1fff7, 0x3fff5, - 0x3fff6, 0x3fff1, 0xffff8, 0x7fff1, 0x7fff2, 0x7fff3, 0xffff9, 0x7fff7, - 0x7fff4, 0xffffa, 0xffffb, 0xffffc, 0xffffd, 0xffffe, 0xfffff, -}; - -static const uint8_t t_huffman_env_bal_3_0dB_bits[25] = { - 13, 13, 13, 13, 13, 13, 13, 12, - 8, 7, 4, 3, 1, 2, 5, 6, - 9, 13, 13, 13, 13, 13, 13, 14, - 14, -}; - -static const uint16_t t_huffman_env_bal_3_0dB_codes[25] = { - 0x1ff2, 0x1ff3, 0x1ff4, 0x1ff5, 0x1ff6, 0x1ff7, 0x1ff8, 0x0ff8, - 0x00fe, 0x007e, 0x000e, 0x0006, 0x0000, 0x0002, 0x001e, 0x003e, - 0x01fe, 0x1ff9, 0x1ffa, 0x1ffb, 0x1ffc, 0x1ffd, 0x1ffe, 0x3ffe, - 0x3fff, -}; - -static const uint8_t f_huffman_env_bal_3_0dB_bits[25] = { - 13, 13, 13, 13, 13, 14, 14, 11, - 8, 7, 4, 2, 1, 3, 5, 6, - 9, 12, 13, 14, 14, 14, 14, 14, - 14, -}; - -static const uint16_t f_huffman_env_bal_3_0dB_codes[25] = { - 0x1ff7, 0x1ff8, 0x1ff9, 0x1ffa, 0x1ffb, 0x3ff8, 0x3ff9, 0x07fc, - 0x00fe, 0x007e, 0x000e, 0x0002, 0x0000, 0x0006, 0x001e, 0x003e, - 0x01fe, 0x0ffa, 0x1ff6, 0x3ffa, 0x3ffb, 0x3ffc, 0x3ffd, 0x3ffe, - 0x3fff, -}; - -static const uint8_t t_huffman_noise_3_0dB_bits[63] = { - 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 11, 8, 6, 4, 3, 1, - 2, 5, 8, 10, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 14, 14, -}; - -static const uint16_t t_huffman_noise_3_0dB_codes[63] = { - 0x1fce, 0x1fcf, 0x1fd0, 0x1fd1, 0x1fd2, 0x1fd3, 0x1fd4, 0x1fd5, - 0x1fd6, 0x1fd7, 0x1fd8, 0x1fd9, 0x1fda, 0x1fdb, 0x1fdc, 0x1fdd, - 0x1fde, 0x1fdf, 0x1fe0, 0x1fe1, 0x1fe2, 0x1fe3, 0x1fe4, 0x1fe5, - 0x1fe6, 0x1fe7, 0x07f2, 0x00fd, 0x003e, 0x000e, 0x0006, 0x0000, - 0x0002, 0x001e, 0x00fc, 0x03f8, 0x1fcc, 0x1fe8, 0x1fe9, 0x1fea, - 0x1feb, 0x1fec, 0x1fcd, 0x1fed, 0x1fee, 0x1fef, 0x1ff0, 0x1ff1, - 0x1ff2, 0x1ff3, 0x1ff4, 0x1ff5, 0x1ff6, 0x1ff7, 0x1ff8, 0x1ff9, - 0x1ffa, 0x1ffb, 0x1ffc, 0x1ffd, 0x1ffe, 0x3ffe, 0x3fff, -}; - -static const uint8_t t_huffman_noise_bal_3_0dB_bits[25] = { - 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 5, 2, 1, 3, 6, 8, - 8, 8, 8, 8, 8, 8, 8, 8, - 8, -}; - -static const uint8_t t_huffman_noise_bal_3_0dB_codes[25] = { - 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, - 0xf4, 0xf5, 0x1c, 0x02, 0x00, 0x06, 0x3a, 0xf6, - 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, - 0xff, -}; - -static const int8_t sbr_offset[6][16] = { - {-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7}, // fs_sbr = 16000 Hz - {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13}, // fs_sbr = 22050 Hz - {-5, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16}, // fs_sbr = 24000 Hz - {-6, -4, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16}, // fs_sbr = 32000 Hz - {-4, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16, 20}, // 44100 Hz <= fs_sbr <= 64000 Hz - {-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16, 20, 24}, // 64000 Hz < fs_sbr -}; - -/* First eight entries repeated at end to simplify SIMD implementations. */ -const attribute_visibility_hidden DECLARE_ALIGNED(16, INTFLOAT, AAC_RENAME(ff_sbr_noise_table))[][2] = { -{Q31(-0.99948153278296f), Q31(-0.59483417516607f)}, {Q31( 0.97113454393991f), Q31(-0.67528515225647f)}, -{Q31( 0.14130051758487f), Q31(-0.95090983575689f)}, {Q31(-0.47005496701697f), Q31(-0.37340549728647f)}, -{Q31( 0.80705063769351f), Q31( 0.29653668284408f)}, {Q31(-0.38981478896926f), Q31( 0.89572605717087f)}, -{Q31(-0.01053049862020f), Q31(-0.66959058036166f)}, {Q31(-0.91266367957293f), Q31(-0.11522938140034f)}, -{Q31( 0.54840422910309f), Q31( 0.75221367176302f)}, {Q31( 0.40009252867955f), Q31(-0.98929400334421f)}, -{Q31(-0.99867974711855f), Q31(-0.88147068645358f)}, {Q31(-0.95531076805040f), Q31( 0.90908757154593f)}, -{Q31(-0.45725933317144f), Q31(-0.56716323646760f)}, {Q31(-0.72929675029275f), Q31(-0.98008272727324f)}, -{Q31( 0.75622801399036f), Q31( 0.20950329995549f)}, {Q31( 0.07069442601050f), Q31(-0.78247898470706f)}, -{Q31( 0.74496252926055f), Q31(-0.91169004445807f)}, {Q31(-0.96440182703856f), Q31(-0.94739918296622f)}, -{Q31( 0.30424629369539f), Q31(-0.49438267012479f)}, {Q31( 0.66565033746925f), Q31( 0.64652935542491f)}, -{Q31( 0.91697008020594f), Q31( 0.17514097332009f)}, {Q31(-0.70774918760427f), Q31( 0.52548653416543f)}, -{Q31(-0.70051415345560f), Q31(-0.45340028808763f)}, {Q31(-0.99496513054797f), Q31(-0.90071908066973f)}, -{Q31( 0.98164490790123f), Q31(-0.77463155528697f)}, {Q31(-0.54671580548181f), Q31(-0.02570928536004f)}, -{Q31(-0.01689629065389f), Q31( 0.00287506445732f)}, {Q31(-0.86110349531986f), Q31( 0.42548583726477f)}, -{Q31(-0.98892980586032f), Q31(-0.87881132267556f)}, {Q31( 0.51756627678691f), Q31( 0.66926784710139f)}, -{Q31(-0.99635026409640f), Q31(-0.58107730574765f)}, {Q31(-0.99969370862163f), Q31( 0.98369989360250f)}, -{Q31( 0.55266258627194f), Q31( 0.59449057465591f)}, {Q31( 0.34581177741673f), Q31( 0.94879421061866f)}, -{Q31( 0.62664209577999f), Q31(-0.74402970906471f)}, {Q31(-0.77149701404973f), Q31(-0.33883658042801f)}, -{Q31(-0.91592244254432f), Q31( 0.03687901376713f)}, {Q31(-0.76285492357887f), Q31(-0.91371867919124f)}, -{Q31( 0.79788337195331f), Q31(-0.93180971199849f)}, {Q31( 0.54473080610200f), Q31(-0.11919206037186f)}, -{Q31(-0.85639281671058f), Q31( 0.42429854760451f)}, {Q31(-0.92882402971423f), Q31( 0.27871809078609f)}, -{Q31(-0.11708371046774f), Q31(-0.99800843444966f)}, {Q31( 0.21356749817493f), Q31(-0.90716295627033f)}, -{Q31(-0.76191692573909f), Q31( 0.99768118356265f)}, {Q31( 0.98111043100884f), Q31(-0.95854459734407f)}, -{Q31(-0.85913269895572f), Q31( 0.95766566168880f)}, {Q31(-0.93307242253692f), Q31( 0.49431757696466f)}, -{Q31( 0.30485754879632f), Q31(-0.70540034357529f)}, {Q31( 0.85289650925190f), Q31( 0.46766131791044f)}, -{Q31( 0.91328082618125f), Q31(-0.99839597361769f)}, {Q31(-0.05890199924154f), Q31( 0.70741827819497f)}, -{Q31( 0.28398686150148f), Q31( 0.34633555702188f)}, {Q31( 0.95258164539612f), Q31(-0.54893416026939f)}, -{Q31(-0.78566324168507f), Q31(-0.75568541079691f)}, {Q31(-0.95789495447877f), Q31(-0.20423194696966f)}, -{Q31( 0.82411158711197f), Q31( 0.96654618432562f)}, {Q31(-0.65185446735885f), Q31(-0.88734990773289f)}, -{Q31(-0.93643603134666f), Q31( 0.99870790442385f)}, {Q31( 0.91427159529618f), Q31(-0.98290505544444f)}, -{Q31(-0.70395684036886f), Q31( 0.58796798221039f)}, {Q31( 0.00563771969365f), Q31( 0.61768196727244f)}, -{Q31( 0.89065051931895f), Q31( 0.52783352697585f)}, {Q31(-0.68683707712762f), Q31( 0.80806944710339f)}, -{Q31( 0.72165342518718f), Q31(-0.69259857349564f)}, {Q31(-0.62928247730667f), Q31( 0.13627037407335f)}, -{Q31( 0.29938434065514f), Q31(-0.46051329682246f)}, {Q31(-0.91781958879280f), Q31(-0.74012716684186f)}, -{Q31( 0.99298717043688f), Q31( 0.40816610075661f)}, {Q31( 0.82368298622748f), Q31(-0.74036047190173f)}, -{Q31(-0.98512833386833f), Q31(-0.99972330709594f)}, {Q31(-0.95915368242257f), Q31(-0.99237800466040f)}, -{Q31(-0.21411126572790f), Q31(-0.93424819052545f)}, {Q31(-0.68821476106884f), Q31(-0.26892306315457f)}, -{Q31( 0.91851997982317f), Q31( 0.09358228901785f)}, {Q31(-0.96062769559127f), Q31( 0.36099095133739f)}, -{Q31( 0.51646184922287f), Q31(-0.71373332873917f)}, {Q31( 0.61130721139669f), Q31( 0.46950141175917f)}, -{Q31( 0.47336129371299f), Q31(-0.27333178296162f)}, {Q31( 0.90998308703519f), Q31( 0.96715662938132f)}, -{Q31( 0.44844799194357f), Q31( 0.99211574628306f)}, {Q31( 0.66614891079092f), Q31( 0.96590176169121f)}, -{Q31( 0.74922239129237f), Q31(-0.89879858826087f)}, {Q31(-0.99571588506485f), Q31( 0.52785521494349f)}, -{Q31( 0.97401082477563f), Q31(-0.16855870075190f)}, {Q31( 0.72683747733879f), Q31(-0.48060774432251f)}, -{Q31( 0.95432193457128f), Q31( 0.68849603408441f)}, {Q31(-0.72962208425191f), Q31(-0.76608443420917f)}, -{Q31(-0.85359479233537f), Q31( 0.88738125901579f)}, {Q31(-0.81412430338535f), Q31(-0.97480768049637f)}, -{Q31(-0.87930772356786f), Q31( 0.74748307690436f)}, {Q31(-0.71573331064977f), Q31(-0.98570608178923f)}, -{Q31( 0.83524300028228f), Q31( 0.83702537075163f)}, {Q31(-0.48086065601423f), Q31(-0.98848504923531f)}, -{Q31( 0.97139128574778f), Q31( 0.80093621198236f)}, {Q31( 0.51992825347895f), Q31( 0.80247631400510f)}, -{Q31(-0.00848591195325f), Q31(-0.76670128000486f)}, {Q31(-0.70294374303036f), Q31( 0.55359910445577f)}, -{Q31(-0.95894428168140f), Q31(-0.43265504344783f)}, {Q31( 0.97079252950321f), Q31( 0.09325857238682f)}, -{Q31(-0.92404293670797f), Q31( 0.85507704027855f)}, {Q31(-0.69506469500450f), Q31( 0.98633412625459f)}, -{Q31( 0.26559203620024f), Q31( 0.73314307966524f)}, {Q31( 0.28038443336943f), Q31( 0.14537913654427f)}, -{Q31(-0.74138124825523f), Q31( 0.99310339807762f)}, {Q31(-0.01752795995444f), Q31(-0.82616635284178f)}, -{Q31(-0.55126773094930f), Q31(-0.98898543862153f)}, {Q31( 0.97960898850996f), Q31(-0.94021446752851f)}, -{Q31(-0.99196309146936f), Q31( 0.67019017358456f)}, {Q31(-0.67684928085260f), Q31( 0.12631491649378f)}, -{Q31( 0.09140039465500f), Q31(-0.20537731453108f)}, {Q31(-0.71658965751996f), Q31(-0.97788200391224f)}, -{Q31( 0.81014640078925f), Q31( 0.53722648362443f)}, {Q31( 0.40616991671205f), Q31(-0.26469008598449f)}, -{Q31(-0.67680188682972f), Q31( 0.94502052337695f)}, {Q31( 0.86849774348749f), Q31(-0.18333598647899f)}, -{Q31(-0.99500381284851f), Q31(-0.02634122068550f)}, {Q31( 0.84329189340667f), Q31( 0.10406957462213f)}, -{Q31(-0.09215968531446f), Q31( 0.69540012101253f)}, {Q31( 0.99956173327206f), Q31(-0.12358542001404f)}, -{Q31(-0.79732779473535f), Q31(-0.91582524736159f)}, {Q31( 0.96349973642406f), Q31( 0.96640458041000f)}, -{Q31(-0.79942778496547f), Q31( 0.64323902822857f)}, {Q31(-0.11566039853896f), Q31( 0.28587846253726f)}, -{Q31(-0.39922954514662f), Q31( 0.94129601616966f)}, {Q31( 0.99089197565987f), Q31(-0.92062625581587f)}, -{Q31( 0.28631285179909f), Q31(-0.91035047143603f)}, {Q31(-0.83302725605608f), Q31(-0.67330410892084f)}, -{Q31( 0.95404443402072f), Q31( 0.49162765398743f)}, {Q31(-0.06449863579434f), Q31( 0.03250560813135f)}, -{Q31(-0.99575054486311f), Q31( 0.42389784469507f)}, {Q31(-0.65501142790847f), Q31( 0.82546114655624f)}, -{Q31(-0.81254441908887f), Q31(-0.51627234660629f)}, {Q31(-0.99646369485481f), Q31( 0.84490533520752f)}, -{Q31( 0.00287840603348f), Q31( 0.64768261158166f)}, {Q31( 0.70176989408455f), Q31(-0.20453028573322f)}, -{Q31( 0.96361882270190f), Q31( 0.40706967140989f)}, {Q31(-0.68883758192426f), Q31( 0.91338958840772f)}, -{Q31(-0.34875585502238f), Q31( 0.71472290693300f)}, {Q31( 0.91980081243087f), Q31( 0.66507455644919f)}, -{Q31(-0.99009048343881f), Q31( 0.85868021604848f)}, {Q31( 0.68865791458395f), Q31( 0.55660316809678f)}, -{Q31(-0.99484402129368f), Q31(-0.20052559254934f)}, {Q31( 0.94214511408023f), Q31(-0.99696425367461f)}, -{Q31(-0.67414626793544f), Q31( 0.49548221180078f)}, {Q31(-0.47339353684664f), Q31(-0.85904328834047f)}, -{Q31( 0.14323651387360f), Q31(-0.94145598222488f)}, {Q31(-0.29268293575672f), Q31( 0.05759224927952f)}, -{Q31( 0.43793861458754f), Q31(-0.78904969892724f)}, {Q31(-0.36345126374441f), Q31( 0.64874435357162f)}, -{Q31(-0.08750604656825f), Q31( 0.97686944362527f)}, {Q31(-0.96495267812511f), Q31(-0.53960305946511f)}, -{Q31( 0.55526940659947f), Q31( 0.78891523734774f)}, {Q31( 0.73538215752630f), Q31( 0.96452072373404f)}, -{Q31(-0.30889773919437f), Q31(-0.80664389776860f)}, {Q31( 0.03574995626194f), Q31(-0.97325616900959f)}, -{Q31( 0.98720684660488f), Q31( 0.48409133691962f)}, {Q31(-0.81689296271203f), Q31(-0.90827703628298f)}, -{Q31( 0.67866860118215f), Q31( 0.81284503870856f)}, {Q31(-0.15808569732583f), Q31( 0.85279555024382f)}, -{Q31( 0.80723395114371f), Q31(-0.24717418514605f)}, {Q31( 0.47788757329038f), Q31(-0.46333147839295f)}, -{Q31( 0.96367554763201f), Q31( 0.38486749303242f)}, {Q31(-0.99143875716818f), Q31(-0.24945277239809f)}, -{Q31( 0.83081876925833f), Q31(-0.94780851414763f)}, {Q31(-0.58753191905341f), Q31( 0.01290772389163f)}, -{Q31( 0.95538108220960f), Q31(-0.85557052096538f)}, {Q31(-0.96490920476211f), Q31(-0.64020970923102f)}, -{Q31(-0.97327101028521f), Q31( 0.12378128133110f)}, {Q31( 0.91400366022124f), Q31( 0.57972471346930f)}, -{Q31(-0.99925837363824f), Q31( 0.71084847864067f)}, {Q31(-0.86875903507313f), Q31(-0.20291699203564f)}, -{Q31(-0.26240034795124f), Q31(-0.68264554369108f)}, {Q31(-0.24664412953388f), Q31(-0.87642273115183f)}, -{Q31( 0.02416275806869f), Q31( 0.27192914288905f)}, {Q31( 0.82068619590515f), Q31(-0.85087787994476f)}, -{Q31( 0.88547373760759f), Q31(-0.89636802901469f)}, {Q31(-0.18173078152226f), Q31(-0.26152145156800f)}, -{Q31( 0.09355476558534f), Q31( 0.54845123045604f)}, {Q31(-0.54668414224090f), Q31( 0.95980774020221f)}, -{Q31( 0.37050990604091f), Q31(-0.59910140383171f)}, {Q31(-0.70373594262891f), Q31( 0.91227665827081f)}, -{Q31(-0.34600785879594f), Q31(-0.99441426144200f)}, {Q31(-0.68774481731008f), Q31(-0.30238837956299f)}, -{Q31(-0.26843291251234f), Q31( 0.83115668004362f)}, {Q31( 0.49072334613242f), Q31(-0.45359708737775f)}, -{Q31( 0.38975993093975f), Q31( 0.95515358099121f)}, {Q31(-0.97757125224150f), Q31( 0.05305894580606f)}, -{Q31(-0.17325552859616f), Q31(-0.92770672250494f)}, {Q31( 0.99948035025744f), Q31( 0.58285545563426f)}, -{Q31(-0.64946246527458f), Q31( 0.68645507104960f)}, {Q31(-0.12016920576437f), Q31(-0.57147322153312f)}, -{Q31(-0.58947456517751f), Q31(-0.34847132454388f)}, {Q31(-0.41815140454465f), Q31( 0.16276422358861f)}, -{Q31( 0.99885650204884f), Q31( 0.11136095490444f)}, {Q31(-0.56649614128386f), Q31(-0.90494866361587f)}, -{Q31( 0.94138021032330f), Q31( 0.35281916733018f)}, {Q31(-0.75725076534641f), Q31( 0.53650549640587f)}, -{Q31( 0.20541973692630f), Q31(-0.94435144369918f)}, {Q31( 0.99980371023351f), Q31( 0.79835913565599f)}, -{Q31( 0.29078277605775f), Q31( 0.35393777921520f)}, {Q31(-0.62858772103030f), Q31( 0.38765693387102f)}, -{Q31( 0.43440904467688f), Q31(-0.98546330463232f)}, {Q31(-0.98298583762390f), Q31( 0.21021524625209f)}, -{Q31( 0.19513029146934f), Q31(-0.94239832251867f)}, {Q31(-0.95476662400101f), Q31( 0.98364554179143f)}, -{Q31( 0.93379635304810f), Q31(-0.70881994583682f)}, {Q31(-0.85235410573336f), Q31(-0.08342347966410f)}, -{Q31(-0.86425093011245f), Q31(-0.45795025029466f)}, {Q31( 0.38879779059045f), Q31( 0.97274429344593f)}, -{Q31( 0.92045124735495f), Q31(-0.62433652524220f)}, {Q31( 0.89162532251878f), Q31( 0.54950955570563f)}, -{Q31(-0.36834336949252f), Q31( 0.96458298020975f)}, {Q31( 0.93891760988045f), Q31(-0.89968353740388f)}, -{Q31( 0.99267657565094f), Q31(-0.03757034316958f)}, {Q31(-0.94063471614176f), Q31( 0.41332338538963f)}, -{Q31( 0.99740224117019f), Q31(-0.16830494996370f)}, {Q31(-0.35899413170555f), Q31(-0.46633226649613f)}, -{Q31( 0.05237237274947f), Q31(-0.25640361602661f)}, {Q31( 0.36703583957424f), Q31(-0.38653265641875f)}, -{Q31( 0.91653180367913f), Q31(-0.30587628726597f)}, {Q31( 0.69000803499316f), Q31( 0.90952171386132f)}, -{Q31(-0.38658751133527f), Q31( 0.99501571208985f)}, {Q31(-0.29250814029851f), Q31( 0.37444994344615f)}, -{Q31(-0.60182204677608f), Q31( 0.86779651036123f)}, {Q31(-0.97418588163217f), Q31( 0.96468523666475f)}, -{Q31( 0.88461574003963f), Q31( 0.57508405276414f)}, {Q31( 0.05198933055162f), Q31( 0.21269661669964f)}, -{Q31(-0.53499621979720f), Q31( 0.97241553731237f)}, {Q31(-0.49429560226497f), Q31( 0.98183865291903f)}, -{Q31(-0.98935142339139f), Q31(-0.40249159006933f)}, {Q31(-0.98081380091130f), Q31(-0.72856895534041f)}, -{Q31(-0.27338148835532f), Q31( 0.99950922447209f)}, {Q31( 0.06310802338302f), Q31(-0.54539587529618f)}, -{Q31(-0.20461677199539f), Q31(-0.14209977628489f)}, {Q31( 0.66223843141647f), Q31( 0.72528579940326f)}, -{Q31(-0.84764345483665f), Q31( 0.02372316801261f)}, {Q31(-0.89039863483811f), Q31( 0.88866581484602f)}, -{Q31( 0.95903308477986f), Q31( 0.76744927173873f)}, {Q31( 0.73504123909879f), Q31(-0.03747203173192f)}, -{Q31(-0.31744434966056f), Q31(-0.36834111883652f)}, {Q31(-0.34110827591623f), Q31( 0.40211222807691f)}, -{Q31( 0.47803883714199f), Q31(-0.39423219786288f)}, {Q31( 0.98299195879514f), Q31( 0.01989791390047f)}, -{Q31(-0.30963073129751f), Q31(-0.18076720599336f)}, {Q31( 0.99992588229018f), Q31(-0.26281872094289f)}, -{Q31(-0.93149731080767f), Q31(-0.98313162570490f)}, {Q31( 0.99923472302773f), Q31(-0.80142993767554f)}, -{Q31(-0.26024169633417f), Q31(-0.75999759855752f)}, {Q31(-0.35712514743563f), Q31( 0.19298963768574f)}, -{Q31(-0.99899084509530f), Q31( 0.74645156992493f)}, {Q31( 0.86557171579452f), Q31( 0.55593866696299f)}, -{Q31( 0.33408042438752f), Q31( 0.86185953874709f)}, {Q31( 0.99010736374716f), Q31( 0.04602397576623f)}, -{Q31(-0.66694269691195f), Q31(-0.91643611810148f)}, {Q31( 0.64016792079480f), Q31( 0.15649530836856f)}, -{Q31( 0.99570534804836f), Q31( 0.45844586038111f)}, {Q31(-0.63431466947340f), Q31( 0.21079116459234f)}, -{Q31(-0.07706847005931f), Q31(-0.89581437101329f)}, {Q31( 0.98590090577724f), Q31( 0.88241721133981f)}, -{Q31( 0.80099335254678f), Q31(-0.36851896710853f)}, {Q31( 0.78368131392666f), Q31( 0.45506999802597f)}, -{Q31( 0.08707806671691f), Q31( 0.80938994918745f)}, {Q31(-0.86811883080712f), Q31( 0.39347308654705f)}, -{Q31(-0.39466529740375f), Q31(-0.66809432114456f)}, {Q31( 0.97875325649683f), Q31(-0.72467840967746f)}, -{Q31(-0.95038560288864f), Q31( 0.89563219587625f)}, {Q31( 0.17005239424212f), Q31( 0.54683053962658f)}, -{Q31(-0.76910792026848f), Q31(-0.96226617549298f)}, {Q31( 0.99743281016846f), Q31( 0.42697157037567f)}, -{Q31( 0.95437383549973f), Q31( 0.97002324109952f)}, {Q31( 0.99578905365569f), Q31(-0.54106826257356f)}, -{Q31( 0.28058259829990f), Q31(-0.85361420634036f)}, {Q31( 0.85256524470573f), Q31(-0.64567607735589f)}, -{Q31(-0.50608540105128f), Q31(-0.65846015480300f)}, {Q31(-0.97210735183243f), Q31(-0.23095213067791f)}, -{Q31( 0.95424048234441f), Q31(-0.99240147091219f)}, {Q31(-0.96926570524023f), Q31( 0.73775654896574f)}, -{Q31( 0.30872163214726f), Q31( 0.41514960556126f)}, {Q31(-0.24523839572639f), Q31( 0.63206633394807f)}, -{Q31(-0.33813265086024f), Q31(-0.38661779441897f)}, {Q31(-0.05826828420146f), Q31(-0.06940774188029f)}, -{Q31(-0.22898461455054f), Q31( 0.97054853316316f)}, {Q31(-0.18509915019881f), Q31( 0.47565762892084f)}, -{Q31(-0.10488238045009f), Q31(-0.87769947402394f)}, {Q31(-0.71886586182037f), Q31( 0.78030982480538f)}, -{Q31( 0.99793873738654f), Q31( 0.90041310491497f)}, {Q31( 0.57563307626120f), Q31(-0.91034337352097f)}, -{Q31( 0.28909646383717f), Q31( 0.96307783970534f)}, {Q31( 0.42188998312520f), Q31( 0.48148651230437f)}, -{Q31( 0.93335049681047f), Q31(-0.43537023883588f)}, {Q31(-0.97087374418267f), Q31( 0.86636445711364f)}, -{Q31( 0.36722871286923f), Q31( 0.65291654172961f)}, {Q31(-0.81093025665696f), Q31( 0.08778370229363f)}, -{Q31(-0.26240603062237f), Q31(-0.92774095379098f)}, {Q31( 0.83996497984604f), Q31( 0.55839849139647f)}, -{Q31(-0.99909615720225f), Q31(-0.96024605713970f)}, {Q31( 0.74649464155061f), Q31( 0.12144893606462f)}, -{Q31(-0.74774595569805f), Q31(-0.26898062008959f)}, {Q31( 0.95781667469567f), Q31(-0.79047927052628f)}, -{Q31( 0.95472308713099f), Q31(-0.08588776019550f)}, {Q31( 0.48708332746299f), Q31( 0.99999041579432f)}, -{Q31( 0.46332038247497f), Q31( 0.10964126185063f)}, {Q31(-0.76497004940162f), Q31( 0.89210929242238f)}, -{Q31( 0.57397389364339f), Q31( 0.35289703373760f)}, {Q31( 0.75374316974495f), Q31( 0.96705214651335f)}, -{Q31(-0.59174397685714f), Q31(-0.89405370422752f)}, {Q31( 0.75087906691890f), Q31(-0.29612672982396f)}, -{Q31(-0.98607857336230f), Q31( 0.25034911730023f)}, {Q31(-0.40761056640505f), Q31(-0.90045573444695f)}, -{Q31( 0.66929266740477f), Q31( 0.98629493401748f)}, {Q31(-0.97463695257310f), Q31(-0.00190223301301f)}, -{Q31( 0.90145509409859f), Q31( 0.99781390365446f)}, {Q31(-0.87259289048043f), Q31( 0.99233587353666f)}, -{Q31(-0.91529461447692f), Q31(-0.15698707534206f)}, {Q31(-0.03305738840705f), Q31(-0.37205262859764f)}, -{Q31( 0.07223051368337f), Q31(-0.88805001733626f)}, {Q31( 0.99498012188353f), Q31( 0.97094358113387f)}, -{Q31(-0.74904939500519f), Q31( 0.99985483641521f)}, {Q31( 0.04585228574211f), Q31( 0.99812337444082f)}, -{Q31(-0.89054954257993f), Q31(-0.31791913188064f)}, {Q31(-0.83782144651251f), Q31( 0.97637632547466f)}, -{Q31( 0.33454804933804f), Q31(-0.86231516800408f)}, {Q31(-0.99707579362824f), Q31( 0.93237990079441f)}, -{Q31(-0.22827527843994f), Q31( 0.18874759397997f)}, {Q31( 0.67248046289143f), Q31(-0.03646211390569f)}, -{Q31(-0.05146538187944f), Q31(-0.92599700120679f)}, {Q31( 0.99947295749905f), Q31( 0.93625229707912f)}, -{Q31( 0.66951124390363f), Q31( 0.98905825623893f)}, {Q31(-0.99602956559179f), Q31(-0.44654715757688f)}, -{Q31( 0.82104905483590f), Q31( 0.99540741724928f)}, {Q31( 0.99186510988782f), Q31( 0.72023001312947f)}, -{Q31(-0.65284592392918f), Q31( 0.52186723253637f)}, {Q31( 0.93885443798188f), Q31(-0.74895312615259f)}, -{Q31( 0.96735248738388f), Q31( 0.90891816978629f)}, {Q31(-0.22225968841114f), Q31( 0.57124029781228f)}, -{Q31(-0.44132783753414f), Q31(-0.92688840659280f)}, {Q31(-0.85694974219574f), Q31( 0.88844532719844f)}, -{Q31( 0.91783042091762f), Q31(-0.46356892383970f)}, {Q31( 0.72556974415690f), Q31(-0.99899555770747f)}, -{Q31(-0.99711581834508f), Q31( 0.58211560180426f)}, {Q31( 0.77638976371966f), Q31( 0.94321834873819f)}, -{Q31( 0.07717324253925f), Q31( 0.58638399856595f)}, {Q31(-0.56049829194163f), Q31( 0.82522301569036f)}, -{Q31( 0.98398893639988f), Q31( 0.39467440420569f)}, {Q31( 0.47546946844938f), Q31( 0.68613044836811f)}, -{Q31( 0.65675089314631f), Q31( 0.18331637134880f)}, {Q31( 0.03273375457980f), Q31(-0.74933109564108f)}, -{Q31(-0.38684144784738f), Q31( 0.51337349030406f)}, {Q31(-0.97346267944545f), Q31(-0.96549364384098f)}, -{Q31(-0.53282156061942f), Q31(-0.91423265091354f)}, {Q31( 0.99817310731176f), Q31( 0.61133572482148f)}, -{Q31(-0.50254500772635f), Q31(-0.88829338134294f)}, {Q31( 0.01995873238855f), Q31( 0.85223515096765f)}, -{Q31( 0.99930381973804f), Q31( 0.94578896296649f)}, {Q31( 0.82907767600783f), Q31(-0.06323442598128f)}, -{Q31(-0.58660709669728f), Q31( 0.96840773806582f)}, {Q31(-0.17573736667267f), Q31(-0.48166920859485f)}, -{Q31( 0.83434292401346f), Q31(-0.13023450646997f)}, {Q31( 0.05946491307025f), Q31( 0.20511047074866f)}, -{Q31( 0.81505484574602f), Q31(-0.94685947861369f)}, {Q31(-0.44976380954860f), Q31( 0.40894572671545f)}, -{Q31(-0.89746474625671f), Q31( 0.99846578838537f)}, {Q31( 0.39677256130792f), Q31(-0.74854668609359f)}, -{Q31(-0.07588948563079f), Q31( 0.74096214084170f)}, {Q31( 0.76343198951445f), Q31( 0.41746629422634f)}, -{Q31(-0.74490104699626f), Q31( 0.94725911744610f)}, {Q31( 0.64880119792759f), Q31( 0.41336660830571f)}, -{Q31( 0.62319537462542f), Q31(-0.93098313552599f)}, {Q31( 0.42215817594807f), Q31(-0.07712787385208f)}, -{Q31( 0.02704554141885f), Q31(-0.05417518053666f)}, {Q31( 0.80001773566818f), Q31( 0.91542195141039f)}, -{Q31(-0.79351832348816f), Q31(-0.36208897989136f)}, {Q31( 0.63872359151636f), Q31( 0.08128252493444f)}, -{Q31( 0.52890520960295f), Q31( 0.60048872455592f)}, {Q31( 0.74238552914587f), Q31( 0.04491915291044f)}, -{Q31( 0.99096131449250f), Q31(-0.19451182854402f)}, {Q31(-0.80412329643109f), Q31(-0.88513818199457f)}, -{Q31(-0.64612616129736f), Q31( 0.72198674804544f)}, {Q31( 0.11657770663191f), Q31(-0.83662833815041f)}, -{Q31(-0.95053182488101f), Q31(-0.96939905138082f)}, {Q31(-0.62228872928622f), Q31( 0.82767262846661f)}, -{Q31( 0.03004475787316f), Q31(-0.99738896333384f)}, {Q31(-0.97987214341034f), Q31( 0.36526129686425f)}, -{Q31(-0.99986980746200f), Q31(-0.36021610299715f)}, {Q31( 0.89110648599879f), Q31(-0.97894250343044f)}, -{Q31( 0.10407960510582f), Q31( 0.77357793811619f)}, {Q31( 0.95964737821728f), Q31(-0.35435818285502f)}, -{Q31( 0.50843233159162f), Q31( 0.96107691266205f)}, {Q31( 0.17006334670615f), Q31(-0.76854025314829f)}, -{Q31( 0.25872675063360f), Q31( 0.99893303933816f)}, {Q31(-0.01115998681937f), Q31( 0.98496019742444f)}, -{Q31(-0.79598702973261f), Q31( 0.97138411318894f)}, {Q31(-0.99264708948101f), Q31(-0.99542822402536f)}, -{Q31(-0.99829663752818f), Q31( 0.01877138824311f)}, {Q31(-0.70801016548184f), Q31( 0.33680685948117f)}, -{Q31(-0.70467057786826f), Q31( 0.93272777501857f)}, {Q31( 0.99846021905254f), Q31(-0.98725746254433f)}, -{Q31(-0.63364968534650f), Q31(-0.16473594423746f)}, {Q31(-0.16258217500792f), Q31(-0.95939125400802f)}, -{Q31(-0.43645594360633f), Q31(-0.94805030113284f)}, {Q31(-0.99848471702976f), Q31( 0.96245166923809f)}, -{Q31(-0.16796458968998f), Q31(-0.98987511890470f)}, {Q31(-0.87979225745213f), Q31(-0.71725725041680f)}, -{Q31( 0.44183099021786f), Q31(-0.93568974498761f)}, {Q31( 0.93310180125532f), Q31(-0.99913308068246f)}, -{Q31(-0.93941931782002f), Q31(-0.56409379640356f)}, {Q31(-0.88590003188677f), Q31( 0.47624600491382f)}, -{Q31( 0.99971463703691f), Q31(-0.83889954253462f)}, {Q31(-0.75376385639978f), Q31( 0.00814643438625f)}, -{Q31( 0.93887685615875f), Q31(-0.11284528204636f)}, {Q31( 0.85126435782309f), Q31( 0.52349251543547f)}, -{Q31( 0.39701421446381f), Q31( 0.81779634174316f)}, {Q31(-0.37024464187437f), Q31(-0.87071656222959f)}, -{Q31(-0.36024828242896f), Q31( 0.34655735648287f)}, {Q31(-0.93388812549209f), Q31(-0.84476541096429f)}, -{Q31(-0.65298804552119f), Q31(-0.18439575450921f)}, {Q31( 0.11960319006843f), Q31( 0.99899346780168f)}, -{Q31( 0.94292565553160f), Q31( 0.83163906518293f)}, {Q31( 0.75081145286948f), Q31(-0.35533223142265f)}, -{Q31( 0.56721979748394f), Q31(-0.24076836414499f)}, {Q31( 0.46857766746029f), Q31(-0.30140233457198f)}, -{Q31( 0.97312313923635f), Q31(-0.99548191630031f)}, {Q31(-0.38299976567017f), Q31( 0.98516909715427f)}, -{Q31( 0.41025800019463f), Q31( 0.02116736935734f)}, {Q31( 0.09638062008048f), Q31( 0.04411984381457f)}, -{Q31(-0.85283249275397f), Q31( 0.91475563922421f)}, {Q31( 0.88866808958124f), Q31(-0.99735267083226f)}, -{Q31(-0.48202429536989f), Q31(-0.96805608884164f)}, {Q31( 0.27572582416567f), Q31( 0.58634753335832f)}, -{Q31(-0.65889129659168f), Q31( 0.58835634138583f)}, {Q31( 0.98838086953732f), Q31( 0.99994349600236f)}, -{Q31(-0.20651349620689f), Q31( 0.54593044066355f)}, {Q31(-0.62126416356920f), Q31(-0.59893681700392f)}, -{Q31( 0.20320105410437f), Q31(-0.86879180355289f)}, {Q31(-0.97790548600584f), Q31( 0.96290806999242f)}, -{Q31( 0.11112534735126f), Q31( 0.21484763313301f)}, {Q31(-0.41368337314182f), Q31( 0.28216837680365f)}, -{Q31( 0.24133038992960f), Q31( 0.51294362630238f)}, {Q31(-0.66393410674885f), Q31(-0.08249679629081f)}, -{Q31(-0.53697829178752f), Q31(-0.97649903936228f)}, {Q31(-0.97224737889348f), Q31( 0.22081333579837f)}, -{Q31( 0.87392477144549f), Q31(-0.12796173740361f)}, {Q31( 0.19050361015753f), Q31( 0.01602615387195f)}, -{Q31(-0.46353441212724f), Q31(-0.95249041539006f)}, {Q31(-0.07064096339021f), Q31(-0.94479803205886f)}, -{Q31(-0.92444085484466f), Q31(-0.10457590187436f)}, {Q31(-0.83822593578728f), Q31(-0.01695043208885f)}, -{Q31( 0.75214681811150f), Q31(-0.99955681042665f)}, {Q31(-0.42102998829339f), Q31( 0.99720941999394f)}, -{Q31(-0.72094786237696f), Q31(-0.35008961934255f)}, {Q31( 0.78843311019251f), Q31( 0.52851398958271f)}, -{Q31( 0.97394027897442f), Q31(-0.26695944086561f)}, {Q31( 0.99206463477946f), Q31(-0.57010120849429f)}, -{Q31( 0.76789609461795f), Q31(-0.76519356730966f)}, {Q31(-0.82002421836409f), Q31(-0.73530179553767f)}, -{Q31( 0.81924990025724f), Q31( 0.99698425250579f)}, {Q31(-0.26719850873357f), Q31( 0.68903369776193f)}, -{Q31(-0.43311260380975f), Q31( 0.85321815947490f)}, {Q31( 0.99194979673836f), Q31( 0.91876249766422f)}, -{Q31(-0.80692001248487f), Q31(-0.32627540663214f)}, {Q31( 0.43080003649976f), Q31(-0.21919095636638f)}, -{Q31( 0.67709491937357f), Q31(-0.95478075822906f)}, {Q31( 0.56151770568316f), Q31(-0.70693811747778f)}, -{Q31( 0.10831862810749f), Q31(-0.08628837174592f)}, {Q31( 0.91229417540436f), Q31(-0.65987351408410f)}, -{Q31(-0.48972893932274f), Q31( 0.56289246362686f)}, {Q31(-0.89033658689697f), Q31(-0.71656563987082f)}, -{Q31( 0.65269447475094f), Q31( 0.65916004833932f)}, {Q31( 0.67439478141121f), Q31(-0.81684380846796f)}, -{Q31(-0.47770832416973f), Q31(-0.16789556203025f)}, {Q31(-0.99715979260878f), Q31(-0.93565784007648f)}, -{Q31(-0.90889593602546f), Q31( 0.62034397054380f)}, {Q31(-0.06618622548177f), Q31(-0.23812217221359f)}, -{Q31( 0.99430266919728f), Q31( 0.18812555317553f)}, {Q31( 0.97686402381843f), Q31(-0.28664534366620f)}, -{Q31( 0.94813650221268f), Q31(-0.97506640027128f)}, {Q31(-0.95434497492853f), Q31(-0.79607978501983f)}, -{Q31(-0.49104783137150f), Q31( 0.32895214359663f)}, {Q31( 0.99881175120751f), Q31( 0.88993983831354f)}, -{Q31( 0.50449166760303f), Q31(-0.85995072408434f)}, {Q31( 0.47162891065108f), Q31(-0.18680204049569f)}, -{Q31(-0.62081581361840f), Q31( 0.75000676218956f)}, {Q31(-0.43867015250812f), Q31( 0.99998069244322f)}, -{Q31( 0.98630563232075f), Q31(-0.53578899600662f)}, {Q31(-0.61510362277374f), Q31(-0.89515019899997f)}, -{Q31(-0.03841517601843f), Q31(-0.69888815681179f)}, {Q31(-0.30102157304644f), Q31(-0.07667808922205f)}, -{Q31( 0.41881284182683f), Q31( 0.02188098922282f)}, {Q31(-0.86135454941237f), Q31( 0.98947480909359f)}, -{Q31( 0.67226861393788f), Q31(-0.13494389011014f)}, {Q31(-0.70737398842068f), Q31(-0.76547349325992f)}, -{Q31( 0.94044946687963f), Q31( 0.09026201157416f)}, {Q31(-0.82386352534327f), Q31( 0.08924768823676f)}, -{Q31(-0.32070666698656f), Q31( 0.50143421908753f)}, {Q31( 0.57593163224487f), Q31(-0.98966422921509f)}, -{Q31(-0.36326018419965f), Q31( 0.07440243123228f)}, {Q31( 0.99979044674350f), Q31(-0.14130287347405f)}, -{Q31(-0.92366023326932f), Q31(-0.97979298068180f)}, {Q31(-0.44607178518598f), Q31(-0.54233252016394f)}, -{Q31( 0.44226800932956f), Q31( 0.71326756742752f)}, {Q31( 0.03671907158312f), Q31( 0.63606389366675f)}, -{Q31( 0.52175424682195f), Q31(-0.85396826735705f)}, {Q31(-0.94701139690956f), Q31(-0.01826348194255f)}, -{Q31(-0.98759606946049f), Q31( 0.82288714303073f)}, {Q31( 0.87434794743625f), Q31( 0.89399495655433f)}, -{Q31(-0.93412041758744f), Q31( 0.41374052024363f)}, {Q31( 0.96063943315511f), Q31( 0.93116709541280f)}, -{Q31( 0.97534253457837f), Q31( 0.86150930812689f)}, {Q31( 0.99642466504163f), Q31( 0.70190043427512f)}, -{Q31(-0.94705089665984f), Q31(-0.29580042814306f)}, {Q31( 0.91599807087376f), Q31(-0.98147830385781f)}, -// Start of duplicated table -{Q31(-0.99948153278296f), Q31(-0.59483417516607f)}, {Q31( 0.97113454393991f), Q31(-0.67528515225647f)}, -{Q31( 0.14130051758487f), Q31(-0.95090983575689f)}, {Q31(-0.47005496701697f), Q31(-0.37340549728647f)}, -{Q31( 0.80705063769351f), Q31( 0.29653668284408f)}, {Q31(-0.38981478896926f), Q31( 0.89572605717087f)}, -{Q31(-0.01053049862020f), Q31(-0.66959058036166f)}, {Q31(-0.91266367957293f), Q31(-0.11522938140034f)}, -}; - -///< window coefficients for analysis/synthesis QMF banks -static DECLARE_ALIGNED(32, INTFLOAT, sbr_qmf_window_ds)[320]; -/* This table contains redundancy: It is symmetric about the entry #320 - * with the exception of entries 384 and 512 which are negated. */ -static const DECLARE_ALIGNED(32, INTFLOAT, sbr_qmf_window_us)[640] = { - Q31( 0.0000000000f), Q31(-0.0005525286f), Q31(-0.0005617692f), Q31(-0.0004947518f), - Q31(-0.0004875227f), Q31(-0.0004893791f), Q31(-0.0005040714f), Q31(-0.0005226564f), - Q31(-0.0005466565f), Q31(-0.0005677802f), Q31(-0.0005870930f), Q31(-0.0006132747f), - Q31(-0.0006312493f), Q31(-0.0006540333f), Q31(-0.0006777690f), Q31(-0.0006941614f), - Q31(-0.0007157736f), Q31(-0.0007255043f), Q31(-0.0007440941f), Q31(-0.0007490598f), - Q31(-0.0007681371f), Q31(-0.0007724848f), Q31(-0.0007834332f), Q31(-0.0007779869f), - Q31(-0.0007803664f), Q31(-0.0007801449f), Q31(-0.0007757977f), Q31(-0.0007630793f), - Q31(-0.0007530001f), Q31(-0.0007319357f), Q31(-0.0007215391f), Q31(-0.0006917937f), - Q31(-0.0006650415f), Q31(-0.0006341594f), Q31(-0.0005946118f), Q31(-0.0005564576f), - Q31(-0.0005145572f), Q31(-0.0004606325f), Q31(-0.0004095121f), Q31(-0.0003501175f), - Q31(-0.0002896981f), Q31(-0.0002098337f), Q31(-0.0001446380f), Q31(-0.0000617334f), - Q31( 0.0000134949f), Q31( 0.0001094383f), Q31( 0.0002043017f), Q31( 0.0002949531f), - Q31( 0.0004026540f), Q31( 0.0005107388f), Q31( 0.0006239376f), Q31( 0.0007458025f), - Q31( 0.0008608443f), Q31( 0.0009885988f), Q31( 0.0011250155f), Q31( 0.0012577884f), - Q31( 0.0013902494f), Q31( 0.0015443219f), Q31( 0.0016868083f), Q31( 0.0018348265f), - Q31( 0.0019841140f), Q31( 0.0021461583f), Q31( 0.0023017254f), Q31( 0.0024625616f), - Q31( 0.0026201758f), Q31( 0.0027870464f), Q31( 0.0029469447f), Q31( 0.0031125420f), - Q31( 0.0032739613f), Q31( 0.0034418874f), Q31( 0.0036008268f), Q31( 0.0037603922f), - Q31( 0.0039207432f), Q31( 0.0040819753f), Q31( 0.0042264269f), Q31( 0.0043730719f), - Q31( 0.0045209852f), Q31( 0.0046606460f), Q31( 0.0047932560f), Q31( 0.0049137603f), - Q31( 0.0050393022f), Q31( 0.0051407353f), Q31( 0.0052461166f), Q31( 0.0053471681f), - Q31( 0.0054196775f), Q31( 0.0054876040f), Q31( 0.0055475714f), Q31( 0.0055938023f), - Q31( 0.0056220643f), Q31( 0.0056455196f), Q31( 0.0056389199f), Q31( 0.0056266114f), - Q31( 0.0055917128f), Q31( 0.0055404363f), Q31( 0.0054753783f), Q31( 0.0053838975f), - Q31( 0.0052715758f), Q31( 0.0051382275f), Q31( 0.0049839687f), Q31( 0.0048109469f), - Q31( 0.0046039530f), Q31( 0.0043801861f), Q31( 0.0041251642f), Q31( 0.0038456408f), - Q31( 0.0035401246f), Q31( 0.0032091885f), Q31( 0.0028446757f), Q31( 0.0024508540f), - Q31( 0.0020274176f), Q31( 0.0015784682f), Q31( 0.0010902329f), Q31( 0.0005832264f), - Q31( 0.0000276045f), Q31(-0.0005464280f), Q31(-0.0011568135f), Q31(-0.0018039472f), - Q31(-0.0024826723f), Q31(-0.0031933778f), Q31(-0.0039401124f), Q31(-0.0047222596f), - Q31(-0.0055337211f), Q31(-0.0063792293f), Q31(-0.0072615816f), Q31(-0.0081798233f), - Q31(-0.0091325329f), Q31(-0.0101150215f), Q31(-0.0111315548f), Q31(-0.0121849995f), - Q31( 0.0132718220f), Q31( 0.0143904666f), Q31( 0.0155405553f), Q31( 0.0167324712f), - Q31( 0.0179433381f), Q31( 0.0191872431f), Q31( 0.0204531793f), Q31( 0.0217467550f), - Q31( 0.0230680169f), Q31( 0.0244160992f), Q31( 0.0257875847f), Q31( 0.0271859429f), - Q31( 0.0286072173f), Q31( 0.0300502657f), Q31( 0.0315017608f), Q31( 0.0329754081f), - Q31( 0.0344620948f), Q31( 0.0359697560f), Q31( 0.0374812850f), Q31( 0.0390053679f), - Q31( 0.0405349170f), Q31( 0.0420649094f), Q31( 0.0436097542f), Q31( 0.0451488405f), - Q31( 0.0466843027f), Q31( 0.0482165720f), Q31( 0.0497385755f), Q31( 0.0512556155f), - Q31( 0.0527630746f), Q31( 0.0542452768f), Q31( 0.0557173648f), Q31( 0.0571616450f), - Q31( 0.0585915683f), Q31( 0.0599837480f), Q31( 0.0613455171f), Q31( 0.0626857808f), - Q31( 0.0639715898f), Q31( 0.0652247106f), Q31( 0.0664367512f), Q31( 0.0676075985f), - Q31( 0.0687043828f), Q31( 0.0697630244f), Q31( 0.0707628710f), Q31( 0.0717002673f), - Q31( 0.0725682583f), Q31( 0.0733620255f), Q31( 0.0741003642f), Q31( 0.0747452558f), - Q31( 0.0753137336f), Q31( 0.0758008358f), Q31( 0.0761992479f), Q31( 0.0764992170f), - Q31( 0.0767093490f), Q31( 0.0768173975f), Q31( 0.0768230011f), Q31( 0.0767204924f), - Q31( 0.0765050718f), Q31( 0.0761748321f), Q31( 0.0757305756f), Q31( 0.0751576255f), - Q31( 0.0744664394f), Q31( 0.0736406005f), Q31( 0.0726774642f), Q31( 0.0715826364f), - Q31( 0.0703533073f), Q31( 0.0689664013f), Q31( 0.0674525021f), Q31( 0.0657690668f), - Q31( 0.0639444805f), Q31( 0.0619602779f), Q31( 0.0598166570f), Q31( 0.0575152691f), - Q31( 0.0550460034f), Q31( 0.0524093821f), Q31( 0.0495978676f), Q31( 0.0466303305f), - Q31( 0.0434768782f), Q31( 0.0401458278f), Q31( 0.0366418116f), Q31( 0.0329583930f), - Q31( 0.0290824006f), Q31( 0.0250307561f), Q31( 0.0207997072f), Q31( 0.0163701258f), - Q31( 0.0117623832f), Q31( 0.0069636862f), Q31( 0.0019765601f), Q31(-0.0032086896f), - Q31(-0.0085711749f), Q31(-0.0141288827f), Q31(-0.0198834129f), Q31(-0.0258227288f), - Q31(-0.0319531274f), Q31(-0.0382776572f), Q31(-0.0447806821f), Q31(-0.0514804176f), - Q31(-0.0583705326f), Q31(-0.0654409853f), Q31(-0.0726943300f), Q31(-0.0801372934f), - Q31(-0.0877547536f), Q31(-0.0955533352f), Q31(-0.1035329531f), Q31(-0.1116826931f), - Q31(-0.1200077984f), Q31(-0.1285002850f), Q31(-0.1371551761f), Q31(-0.1459766491f), - Q31(-0.1549607071f), Q31(-0.1640958855f), Q31(-0.1733808172f), Q31(-0.1828172548f), - Q31(-0.1923966745f), Q31(-0.2021250176f), Q31(-0.2119735853f), Q31(-0.2219652696f), - Q31(-0.2320690870f), Q31(-0.2423016884f), Q31(-0.2526480309f), Q31(-0.2631053299f), - Q31(-0.2736634040f), Q31(-0.2843214189f), Q31(-0.2950716717f), Q31(-0.3059098575f), - Q31(-0.3168278913f), Q31(-0.3278113727f), Q31(-0.3388722693f), Q31(-0.3499914122f), - Q31( 0.3611589903f), Q31( 0.3723795546f), Q31( 0.3836350013f), Q31( 0.3949211761f), - Q31( 0.4062317676f), Q31( 0.4175696896f), Q31( 0.4289119920f), Q31( 0.4402553754f), - Q31( 0.4515996535f), Q31( 0.4629308085f), Q31( 0.4742453214f), Q31( 0.4855253091f), - Q31( 0.4967708254f), Q31( 0.5079817500f), Q31( 0.5191234970f), Q31( 0.5302240895f), - Q31( 0.5412553448f), Q31( 0.5522051258f), Q31( 0.5630789140f), Q31( 0.5738524131f), - Q31( 0.5845403235f), Q31( 0.5951123086f), Q31( 0.6055783538f), Q31( 0.6159109932f), - Q31( 0.6261242695f), Q31( 0.6361980107f), Q31( 0.6461269695f), Q31( 0.6559016302f), - Q31( 0.6655139880f), Q31( 0.6749663190f), Q31( 0.6842353293f), Q31( 0.6933282376f), - Q31( 0.7022388719f), Q31( 0.7109410426f), Q31( 0.7194462634f), Q31( 0.7277448900f), - Q31( 0.7358211758f), Q31( 0.7436827863f), Q31( 0.7513137456f), Q31( 0.7587080760f), - Q31( 0.7658674865f), Q31( 0.7727780881f), Q31( 0.7794287519f), Q31( 0.7858353120f), - Q31( 0.7919735841f), Q31( 0.7978466413f), Q31( 0.8034485751f), Q31( 0.8087695004f), - Q31( 0.8138191270f), Q31( 0.8185776004f), Q31( 0.8230419890f), Q31( 0.8272275347f), - Q31( 0.8311038457f), Q31( 0.8346937361f), Q31( 0.8379717337f), Q31( 0.8409541392f), - Q31( 0.8436238281f), Q31( 0.8459818469f), Q31( 0.8480315777f), Q31( 0.8497805198f), - Q31( 0.8511971524f), Q31( 0.8523047035f), Q31( 0.8531020949f), Q31( 0.8535720573f), - Q31( 0.8537385600f), Q31( 0.8535720573f), Q31( 0.8531020949f), Q31( 0.8523047035f), - Q31( 0.8511971524f), Q31( 0.8497805198f), Q31( 0.8480315777f), Q31( 0.8459818469f), - Q31( 0.8436238281f), Q31( 0.8409541392f), Q31( 0.8379717337f), Q31( 0.8346937361f), - Q31( 0.8311038457f), Q31( 0.8272275347f), Q31( 0.8230419890f), Q31( 0.8185776004f), - Q31( 0.8138191270f), Q31( 0.8087695004f), Q31( 0.8034485751f), Q31( 0.7978466413f), - Q31( 0.7919735841f), Q31( 0.7858353120f), Q31( 0.7794287519f), Q31( 0.7727780881f), - Q31( 0.7658674865f), Q31( 0.7587080760f), Q31( 0.7513137456f), Q31( 0.7436827863f), - Q31( 0.7358211758f), Q31( 0.7277448900f), Q31( 0.7194462634f), Q31( 0.7109410426f), - Q31( 0.7022388719f), Q31( 0.6933282376f), Q31( 0.6842353293f), Q31( 0.6749663190f), - Q31( 0.6655139880f), Q31( 0.6559016302f), Q31( 0.6461269695f), Q31( 0.6361980107f), - Q31( 0.6261242695f), Q31( 0.6159109932f), Q31( 0.6055783538f), Q31( 0.5951123086f), - Q31( 0.5845403235f), Q31( 0.5738524131f), Q31( 0.5630789140f), Q31( 0.5522051258f), - Q31( 0.5412553448f), Q31( 0.5302240895f), Q31( 0.5191234970f), Q31( 0.5079817500f), - Q31( 0.4967708254f), Q31( 0.4855253091f), Q31( 0.4742453214f), Q31( 0.4629308085f), - Q31( 0.4515996535f), Q31( 0.4402553754f), Q31( 0.4289119920f), Q31( 0.4175696896f), - Q31( 0.4062317676f), Q31( 0.3949211761f), Q31( 0.3836350013f), Q31( 0.3723795546f), - -Q31( 0.3611589903f), Q31(-0.3499914122f), Q31(-0.3388722693f), Q31(-0.3278113727f), - Q31(-0.3168278913f), Q31(-0.3059098575f), Q31(-0.2950716717f), Q31(-0.2843214189f), - Q31(-0.2736634040f), Q31(-0.2631053299f), Q31(-0.2526480309f), Q31(-0.2423016884f), - Q31(-0.2320690870f), Q31(-0.2219652696f), Q31(-0.2119735853f), Q31(-0.2021250176f), - Q31(-0.1923966745f), Q31(-0.1828172548f), Q31(-0.1733808172f), Q31(-0.1640958855f), - Q31(-0.1549607071f), Q31(-0.1459766491f), Q31(-0.1371551761f), Q31(-0.1285002850f), - Q31(-0.1200077984f), Q31(-0.1116826931f), Q31(-0.1035329531f), Q31(-0.0955533352f), - Q31(-0.0877547536f), Q31(-0.0801372934f), Q31(-0.0726943300f), Q31(-0.0654409853f), - Q31(-0.0583705326f), Q31(-0.0514804176f), Q31(-0.0447806821f), Q31(-0.0382776572f), - Q31(-0.0319531274f), Q31(-0.0258227288f), Q31(-0.0198834129f), Q31(-0.0141288827f), - Q31(-0.0085711749f), Q31(-0.0032086896f), Q31( 0.0019765601f), Q31( 0.0069636862f), - Q31( 0.0117623832f), Q31( 0.0163701258f), Q31( 0.0207997072f), Q31( 0.0250307561f), - Q31( 0.0290824006f), Q31( 0.0329583930f), Q31( 0.0366418116f), Q31( 0.0401458278f), - Q31( 0.0434768782f), Q31( 0.0466303305f), Q31( 0.0495978676f), Q31( 0.0524093821f), - Q31( 0.0550460034f), Q31( 0.0575152691f), Q31( 0.0598166570f), Q31( 0.0619602779f), - Q31( 0.0639444805f), Q31( 0.0657690668f), Q31( 0.0674525021f), Q31( 0.0689664013f), - Q31( 0.0703533073f), Q31( 0.0715826364f), Q31( 0.0726774642f), Q31( 0.0736406005f), - Q31( 0.0744664394f), Q31( 0.0751576255f), Q31( 0.0757305756f), Q31( 0.0761748321f), - Q31( 0.0765050718f), Q31( 0.0767204924f), Q31( 0.0768230011f), Q31( 0.0768173975f), - Q31( 0.0767093490f), Q31( 0.0764992170f), Q31( 0.0761992479f), Q31( 0.0758008358f), - Q31( 0.0753137336f), Q31( 0.0747452558f), Q31( 0.0741003642f), Q31( 0.0733620255f), - Q31( 0.0725682583f), Q31( 0.0717002673f), Q31( 0.0707628710f), Q31( 0.0697630244f), - Q31( 0.0687043828f), Q31( 0.0676075985f), Q31( 0.0664367512f), Q31( 0.0652247106f), - Q31( 0.0639715898f), Q31( 0.0626857808f), Q31( 0.0613455171f), Q31( 0.0599837480f), - Q31( 0.0585915683f), Q31( 0.0571616450f), Q31( 0.0557173648f), Q31( 0.0542452768f), - Q31( 0.0527630746f), Q31( 0.0512556155f), Q31( 0.0497385755f), Q31( 0.0482165720f), - Q31( 0.0466843027f), Q31( 0.0451488405f), Q31( 0.0436097542f), Q31( 0.0420649094f), - Q31( 0.0405349170f), Q31( 0.0390053679f), Q31( 0.0374812850f), Q31( 0.0359697560f), - Q31( 0.0344620948f), Q31( 0.0329754081f), Q31( 0.0315017608f), Q31( 0.0300502657f), - Q31( 0.0286072173f), Q31( 0.0271859429f), Q31( 0.0257875847f), Q31( 0.0244160992f), - Q31( 0.0230680169f), Q31( 0.0217467550f), Q31( 0.0204531793f), Q31( 0.0191872431f), - Q31( 0.0179433381f), Q31( 0.0167324712f), Q31( 0.0155405553f), Q31( 0.0143904666f), - -Q31( 0.0132718220f), Q31(-0.0121849995f), Q31(-0.0111315548f), Q31(-0.0101150215f), - Q31(-0.0091325329f), Q31(-0.0081798233f), Q31(-0.0072615816f), Q31(-0.0063792293f), - Q31(-0.0055337211f), Q31(-0.0047222596f), Q31(-0.0039401124f), Q31(-0.0031933778f), - Q31(-0.0024826723f), Q31(-0.0018039472f), Q31(-0.0011568135f), Q31(-0.0005464280f), - Q31( 0.0000276045f), Q31( 0.0005832264f), Q31( 0.0010902329f), Q31( 0.0015784682f), - Q31( 0.0020274176f), Q31( 0.0024508540f), Q31( 0.0028446757f), Q31( 0.0032091885f), - Q31( 0.0035401246f), Q31( 0.0038456408f), Q31( 0.0041251642f), Q31( 0.0043801861f), - Q31( 0.0046039530f), Q31( 0.0048109469f), Q31( 0.0049839687f), Q31( 0.0051382275f), - Q31( 0.0052715758f), Q31( 0.0053838975f), Q31( 0.0054753783f), Q31( 0.0055404363f), - Q31( 0.0055917128f), Q31( 0.0056266114f), Q31( 0.0056389199f), Q31( 0.0056455196f), - Q31( 0.0056220643f), Q31( 0.0055938023f), Q31( 0.0055475714f), Q31( 0.0054876040f), - Q31( 0.0054196775f), Q31( 0.0053471681f), Q31( 0.0052461166f), Q31( 0.0051407353f), - Q31( 0.0050393022f), Q31( 0.0049137603f), Q31( 0.0047932560f), Q31( 0.0046606460f), - Q31( 0.0045209852f), Q31( 0.0043730719f), Q31( 0.0042264269f), Q31( 0.0040819753f), - Q31( 0.0039207432f), Q31( 0.0037603922f), Q31( 0.0036008268f), Q31( 0.0034418874f), - Q31( 0.0032739613f), Q31( 0.0031125420f), Q31( 0.0029469447f), Q31( 0.0027870464f), - Q31( 0.0026201758f), Q31( 0.0024625616f), Q31( 0.0023017254f), Q31( 0.0021461583f), - Q31( 0.0019841140f), Q31( 0.0018348265f), Q31( 0.0016868083f), Q31( 0.0015443219f), - Q31( 0.0013902494f), Q31( 0.0012577884f), Q31( 0.0011250155f), Q31( 0.0009885988f), - Q31( 0.0008608443f), Q31( 0.0007458025f), Q31( 0.0006239376f), Q31( 0.0005107388f), - Q31( 0.0004026540f), Q31( 0.0002949531f), Q31( 0.0002043017f), Q31( 0.0001094383f), - Q31( 0.0000134949f), Q31(-0.0000617334f), Q31(-0.0001446380f), Q31(-0.0002098337f), - Q31(-0.0002896981f), Q31(-0.0003501175f), Q31(-0.0004095121f), Q31(-0.0004606325f), - Q31(-0.0005145572f), Q31(-0.0005564576f), Q31(-0.0005946118f), Q31(-0.0006341594f), - Q31(-0.0006650415f), Q31(-0.0006917937f), Q31(-0.0007215391f), Q31(-0.0007319357f), - Q31(-0.0007530001f), Q31(-0.0007630793f), Q31(-0.0007757977f), Q31(-0.0007801449f), - Q31(-0.0007803664f), Q31(-0.0007779869f), Q31(-0.0007834332f), Q31(-0.0007724848f), - Q31(-0.0007681371f), Q31(-0.0007490598f), Q31(-0.0007440941f), Q31(-0.0007255043f), - Q31(-0.0007157736f), Q31(-0.0006941614f), Q31(-0.0006777690f), Q31(-0.0006540333f), - Q31(-0.0006312493f), Q31(-0.0006132747f), Q31(-0.0005870930f), Q31(-0.0005677802f), - Q31(-0.0005466565f), Q31(-0.0005226564f), Q31(-0.0005040714f), Q31(-0.0004893791f), - Q31(-0.0004875227f), Q31(-0.0004947518f), Q31(-0.0005617692f), Q31(-0.0005525286f), -}; - -#endif /* AVCODEC_AACSBRDATA_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ffv1_template.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ffv1_template.c deleted file mode 100644 index c5f61b01823b3356411f3597202a46384f33c4f0..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ffv1_template.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * FFV1 codec - * - * Copyright (c) 2003-2013 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -static inline int RENAME(predict)(TYPE *src, TYPE *last) -{ - const int LT = last[-1]; - const int T = last[0]; - const int L = src[-1]; - - return mid_pred(L, L + T - LT, T); -} - -static inline int RENAME(get_context)(PlaneContext *p, TYPE *src, - TYPE *last, TYPE *last2) -{ - const int LT = last[-1]; - const int T = last[0]; - const int RT = last[1]; - const int L = src[-1]; - - if (p->quant_table[3][127] || p->quant_table[4][127]) { - const int TT = last2[0]; - const int LL = src[-2]; - return p->quant_table[0][(L - LT) & 0xFF] + - p->quant_table[1][(LT - T) & 0xFF] + - p->quant_table[2][(T - RT) & 0xFF] + - p->quant_table[3][(LL - L) & 0xFF] + - p->quant_table[4][(TT - T) & 0xFF]; - } else - return p->quant_table[0][(L - LT) & 0xFF] + - p->quant_table[1][(LT - T) & 0xFF] + - p->quant_table[2][(T - RT) & 0xFF]; -} - diff --git a/spaces/congsaPfin/Manga-OCR/logs/Angry Birds 2 How to Play the Fun and Challenging Game Without Downloading.md b/spaces/congsaPfin/Manga-OCR/logs/Angry Birds 2 How to Play the Fun and Challenging Game Without Downloading.md deleted file mode 100644 index 251460c5dc4a92ba30517a33a4ebc66dd440a6ad..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Angry Birds 2 How to Play the Fun and Challenging Game Without Downloading.md +++ /dev/null @@ -1,119 +0,0 @@ - -

      How to Play Angry Birds 2 Without Downloading It

      -

      Angry Birds 2 is a puzzle video game developed by Rovio Entertainment and is the twelfth game in the Angry Birds series. It is the direct sequel to the original Angry Birds, which was released in 2009. The game features new birds, new abilities, new spells, new enemies, new bosses, and new levels. It also has stunning graphics, dynamic gameplay, and various game modes and events.

      -

      angry birds 2 no download


      Download » https://urlca.com/2uOceL



      -

      Angry Birds 2 is one of the most popular games in the world, with over 100 million downloads on Google Play Store alone. However, not everyone wants to download the game on their devices, either because they don't have enough storage space, they don't want to spend money on in-app purchases, or they just prefer playing games online. If you are one of those people, you will be happy to know that you can play Angry Birds 2 online without downloading it. In this article, we will show you how to do that, as well as give you some tips and tricks to enjoy the game more.

      -

      Some of the main features of Angry Birds 2 are :

      -
        -
      • You can choose which bird to use in each level from a deck of cards.
      • -
      • You can use spells to cause more damage to the pigs and their structures.
      • -
      • You can play in multi-stage levels with different challenges and surprises.
      • -
      • You can join a clan and compete with other players in the arena.
      • -
      • You can collect hats and accessories to customize your birds.
      • -
      -

      How to Play Angry Birds 2 Online

      -

      Choose a website that offers Angry Birds 2 online

      -

      The first step to play Angry Birds 2 online is to find a website that offers the game for free. There are many websites that have Angry Birds 2 online, but not all of them are reliable or safe. Here are some of the websites that we recommend:

      - - - - - -
      WebsiteProsCons
      [Angry Birds Game](^3^)- Has all the features of Angry Birds 2
      - No registration or download required
      - Compatible with mobile devices
      - Has ads that may be annoying or inappropriate
      - May not be updated regularly
      [AngryBirds.com](^2^)- Has all the features of Angry Birds 2
      - No download required
      - Official website of Rovio Entertainment
      - Requires registration with email or Facebook
      - Not compatible with mobile devices
      [Poki.com](https://poki.com/en/g/angry-birds-2)- Has all the features of Angry Birds 2
      - No registration or download required
      - Compatible with mobile devices
      - - Has ads that may be annoying or inappropriate
      - May not be updated regularly
      -

      You can choose any of these websites or find another one that suits your preferences. Just make sure that the website is safe, secure, and has good reviews from other users.

      -

      Launch the game and start playing

      -

      Once you have chosen a website, you can launch the game and start playing. The game will load on your browser and you will see the main menu. You can choose to play the campaign mode, the arena mode, or the events mode. You can also access the settings, the shop, the clan, and the daily quests from the menu.

      -

      angry birds 2 online free play
      -angry birds 2 game without downloading
      -angry birds 2 web version no install
      -angry birds 2 browser game no download
      -angry birds 2 html5 play online
      -angry birds 2 mobile game free no download
      -angry birds 2 rovio entertainment no download
      -angry birds 2 slingshot adventure online
      -angry birds 2 play now without download
      -angry birds 2 fun game no download required
      -angry birds 2 defeat the pigs online
      -angry birds 2 level up your characters no download
      -angry birds 2 join a clan online free
      -angry birds 2 compete in the arena no download
      -angry birds 2 collect silly hats online
      -angry birds 2 impress the mighty eagle no download
      -angry birds 2 lots of levels online free
      -angry birds 2 leaderboards no download needed
      -angry birds 2 choose your bird online
      -angry birds 2 multi-stage levels no download
      -angry birds 2 free to play online
      -angry birds 2 daily challenges no download
      -angry birds 2 carbon neutral game online
      -angry birds 2 social networking links no download
      -angry birds 2 advertising of rovio products online
      -angry birds 2 internet connectivity required no download
      -angry birds 2 terms of use online free
      -angry birds 2 privacy policy no download
      -angry birds 2 save the eggs online game
      -angry birds 2 rule the roost no download
      -angry birds 2 train your skills online free
      -angry birds 2 recruit and level up your birds no download
      -angry birds 2 compete in new events online
      -angry birds 2 diamond league no download needed
      -angry birds 2 cast wacky spells online free
      -angry birds 2 get to know the birds no download
      -angry birds 2 behind the scenes online game
      -angry birds 2 new update no download required
      -angry birds 2 q&a with our artists online free
      -angry birds 2 how to play ab2 with gustaf no download
      -angry birds 2 introducing ab2 creators online game
      -angry birds 2 game reaction hank vs william no download
      -angry birds 2 q&a with robin online free
      -angry birds 2 how to use the new hal no download
      -angry birds 2 pick your flock everywhere online game
      -angry birds 2 blast from the past with our designer robin no download
      -angry birds 2 meme react ep.02 online free
      -angry birds 2 team reacts to your ab2 memes no download
      -angry birds 2 haunted livestream online game
      -angry birds 2 learn how to play with leo, hal, silver and terence no download

      -

      To start playing, you need to choose a level from the map. Each level has a different challenge and a different number of pigs to defeat. You can also see the number of cards and spells you have for each level. Cards are the birds that you can use to attack the pigs. Spells are special powers that you can use to cause more damage or create special effects.

      -

      To launch a bird, you need to drag it back on the slingshot and aim at the pigs or their structures. You can also tap on the screen to activate the bird's ability. For example, Red can knock back objects, Chuck can speed up, Bomb can explode, etc. You can also use spells by tapping on their icons. For example, you can use a golden duck spell to drop a bunch of ducks on the pigs, or a chili spell to make a random pig explode.

      -

      You can choose which bird or spell to use in each turn by swiping left or right on the cards. You can also get extra cards by filling up the destruct-o-meter, which increases as you cause more damage. You can also get extra spells by watching ads or spending gems.

      -

      To win a level, you need to defeat all the pigs before you run out of cards or spells. You will also get stars based on your score, which will help you unlock more levels and rewards. Some levels have multiple stages, which means you have to defeat more than one wave of pigs with the same cards and spells.

      -

      Here are some tips and tricks to help you win levels and score high :

      -
        -
      • Use the right bird for the right situation. For example, use Chuck to hit wooden structures, Bomb to hit stone structures, Matilda to hit glass structures, etc.
      • -
      • Use spells wisely. Don't waste them on easy levels or when you have enough birds. Save them for harder levels or when you are stuck.
      • -
      • Aim for weak points and chain reactions. Try to hit the pigs or their structures in places where they will collapse or explode more easily. Also, try to cause chain reactions by hitting TNT crates, balloons, fans, etc.
      • -
      • Use the environment to your advantage. There are many elements in the environment that can help you or hinder you. For example, you can use water to drown pigs, ice to freeze them, wind to blow them away, etc.
      • -
      • Collect feathers and gems. Feathers are used to level up your birds and increase their abilities. Gems are used to buy more spells, continue playing after losing a level, or unlock chests with rewards. You can collect feathers and gems by playing levels, completing daily quests, participating in events, joining a clan, or watching ads.
      • -
      -

      How to Enjoy Angry Birds 2 Online More

      -

      Explore different game modes and events

      -

      Besides the campaign mode, there are other game modes and events that you can enjoy in Angry Birds 2 online. Here are some of them:

      -
        -
      • Arena mode: This is where you can compete with other players in real time. You have to play three random levels and try to score higher than your opponent. You will earn trophies based on your performance, which will help you climb up the leaderboard and unlock more rewards.
      • -
      • Events mode: This is where you can play special levels with different themes and challenges. There are different events every day, week, or month that offer exclusive rewards and prizes. For example, there is an event called Piggy Tower where you have to destroy a tower of pigs with limited birds and spells.
      • -
      • Daily Challenge: This is where you can play a random level every day and try to beat your own high score. You will earn coins based on your score, which you can use to buy hats and accessories for your birds.
      • -
      • Tower of Fortune: This is where you can play a series of levels with increasing difficulty and rewards. You have to pay gems to enter each level and you can choose to continue or quit after each level. If you quit, you will keep all the rewards you earned, but if you lose, you will lose everything. The tower has 100 levels and the rewards get better as you go higher.
      • -
      -

      Join a clan and compete with other players

      -

      Another way to enjoy Angry Birds 2 online more is to join a clan and compete with other players. A clan is a group of players who can chat, share tips, and help each other. You can join an existing clan or create your own clan with your friends.

      -

      By joining a clan, you can participate in clan battles and tournaments. Clan battles are where you and your clan members have to play a series of levels and try to score higher than another clan. Clan tournaments are where you and your clan members have to play in the arena and try to earn more trophies than other clans. Both clan battles and tournaments have different rewards and prizes depending on your rank and performance.

      -

      To join a clan, you need to tap on the clan icon on the main menu and search for a clan that suits your preferences. You can also invite your friends to join your clan by sharing a link or a code.

      -

      Customize your birds with hats and accessories

      -

      The last way to enjoy Angry Birds 2 online more is to customize your birds with hats and accessories. Hats and accessories are items that you can collect and equip on your birds to change their appearance and abilities. For example, you can equip a pirate hat on Red to make him look like a pirate and increase his damage, or a flower crown on Matilda to make her look like a hippie and increase her healing.

      -

      Hats and accessories come in different rarities and qualities, which affect their effects and value. You can collect hats and accessories by playing levels, completing daily quests, participating in events, joining a clan, buying them with coins or gems, or opening chests with keys.

      -

      To customize your birds, you need to tap on the bird icon on the main menu and choose a bird that you want to customize. Then, you can tap on the hat or accessory icon and choose an item that you want to equip. You can also see the stats and effects of each item before equipping it.

      -

      Conclusion

      -

      Angry Birds 2 is a fun and addictive game that you can play online without downloading it. You can enjoy the game more by exploring different game modes and events, joining a clan and competing with other players, and customizing your birds with hats and accessories. If you are looking for a game that will challenge your skills, entertain your senses, and make you laugh, Angry Birds 2 is the game for you. So what are you waiting for? Play Angry Birds 2 online now!

      -

      FAQs

      -

      Q: Is Angry Birds 2 online free?

      -

      A: Yes, Angry Birds 2 online is free to play on any website that offers it. However, some websites may have ads or in-app purchases that may affect your gaming experience.

      -

      Q: Can I play Angry Birds 2 online with my friends?

      -

      A: Yes, you can play Angry Birds 2 online with your friends by joining or creating a clan. You can also chat with your friends in the clan chat or challenge them in the arena.

      -

      Q: How do I update Angry Birds 2 online?

      -

      A: You don't need to update Angry Birds 2 online manually, as the game will update automatically whenever there is a new version available. However, some websites may not update the game regularly, so you may want to check other websites for the latest version.

      -

      Q: What are the minimum requirements to play Angry Birds 2 online?

      -

      A: To play Angry Birds 2 online, you need a device that has an internet connection and a web browser that supports HTML5. The game works best on Chrome, Firefox, Safari, or Edge browsers.

      -

      Q: Where can I find more information about Angry Birds 2 online?

      -

      A: You can find more information about Angry Birds 2 online by visiting the official website of Rovio Entertainment, the developer of the game. You can also follow their social media accounts or join their fan community for news, updates, tips, and more.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Arknights Data Download Tips and Tricks for Getting the Most Out of the Game Data.md b/spaces/congsaPfin/Manga-OCR/logs/Arknights Data Download Tips and Tricks for Getting the Most Out of the Game Data.md deleted file mode 100644 index b4803ef7a8f8a7e9e46e37f67c8357a3186826bf..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Arknights Data Download Tips and Tricks for Getting the Most Out of the Game Data.md +++ /dev/null @@ -1,123 +0,0 @@ -
      -

      Arknights Data Download: How to Get the Latest Game Assets and Updates

      -

      Arknights is a popular tower defense mobile game that features stunning graphics, engaging story, and diverse gameplay. If you are a fan of Arknights, you might have noticed that the game often prompts you to download data when you launch it or enter a new stage. What is this data download for and how can you do it efficiently? In this article, we will answer these questions and give you some tips on how to optimize your data download in Arknights.

      -

      What is Arknights?

      -

      Before we dive into the details of data download, let's first take a look at what Arknights is and why it is so popular among gamers.

      -

      arknights data download


      Download File ☆☆☆☆☆ https://urlca.com/2uOcjQ



      -

      A brief introduction to the game and its features

      -

      Arknights is a tower defense mobile game developed by Hypergryph and Yostar. It was released globally in January 2020 and has since attracted millions of players worldwide. The game is set in a dystopian world where a mysterious substance called Originium causes a deadly infection that grants some people special abilities but also shortens their lifespan. These people are called Operators, and they are hired by a pharmaceutical company called Rhodes Island to fight against a terrorist organization called Reunion that seeks to overthrow the oppressive regime that discriminates against them.

      -

      The main storyline and characters of Arknights

      -

      The game follows the story of Rhodes Island as they travel across different regions and encounter various factions and enemies. The player takes on the role of a Doctor, who is the leader of Rhodes Island and has the ability to command Operators using tactical devices. The player can recruit over 200 Operators from different classes, races, factions, and backgrounds, each with their own unique skills, personalities, and stories. The player can also interact with them through dialogue, trust, gifts, skins, furniture, etc.

      -

      The gameplay modes and mechanics of Arknights

      -

      The game mainly consists of two modes: combat operations and base management. In combat operations, the player has to deploy Operators on a grid-based map and use their skills strategically to stop the enemy waves from reaching their goal. The player can choose from different difficulty levels, stages, challenges, events, etc. In base management, the player has to build and upgrade facilities that provide resources, items, training, healing, etc. The player can also customize their dormitory with furniture that affects their Operators' mood and morale.

      -

      Why do you need to download data in Arknights?

      -

      Now that you have a general idea of what Arknights is about, let's move on to the main topic of this article: data download. Data download is a process that allows the game to update its assets and files without requiring a full app update from the app store. This way, the game can deliver new content and fixes more quickly and conveniently. However, data download also has some pros and cons that you should be aware of.

      -

      The

      The benefits of downloading data in Arknights

      -

      Data download can bring you many advantages as a player of Arknights. Here are some of them:

      -

      Faster loading times and smoother performance

      -

      By downloading data in advance, you can reduce the loading time and lag when you enter a new stage or event. This can improve your gaming experience and prevent frustration and boredom. You can also avoid the annoying pop-up messages that ask you to download data every time you start a new mission.

      -

      arknights data extraction
      -arknights data mining
      -arknights data analysis
      -arknights data pack
      -arknights data update
      -arknights data size
      -arknights data folder
      -arknights data backup
      -arknights data transfer
      -arknights data recovery
      -arknights data mod
      -arknights data apk
      -arknights data obb
      -arknights data cache
      -arknights data error
      -arknights data fix
      -arknights data github
      -arknights data repository
      -arknights data assets
      -arknights data sprites
      -arknights data characters
      -arknights data skins
      -arknights data voice
      -arknights data music
      -arknights data story
      -arknights data events
      -arknights data stages
      -arknights data missions
      -arknights data rewards
      -arknights data rates
      -arknights data banners
      -arknights data operators
      -arknights data skills
      -arknights data talents
      -arknights data stats
      -arknights data traits
      -arknights data tags
      -arknights data recruitment
      -arknights data base
      -arknights data workshop
      -arknights data factory
      -arknights data trading post
      -arknights data dormitory
      -arknights data trust
      -arknights data potential
      -arknights data elite

      -

      Access to new content and events

      -

      Data download also allows you to enjoy the latest content and events that the game offers. You can explore new maps, stories, characters, skins, items, etc. that are added regularly by the developers. You can also participate in limited-time events that reward you with exclusive rewards and bonuses. You don't want to miss out on these opportunities, do you?

      -

      Reduced data usage and storage space

      -

      Another benefit of downloading data in Arknights is that it can save you some data usage and storage space on your device. By downloading data over Wi-Fi, you can avoid consuming your mobile data plan and incur extra charges. By downloading data in chunks, you can also manage your storage space better and delete the data that you don't need anymore.

      -

      The drawbacks of downloading data in Arknights

      -

      However, data download is not without its drawbacks. There are some potential issues that you might encounter when you download data in Arknights. Here are some of them:

      -

      Longer initial download time and possible errors

      -

      One of the drawbacks of downloading data in Arknights is that it can take a long time to complete, especially if you choose to download all the data at once. This can be inconvenient and annoying if you want to play the game right away or if you have a slow or unstable internet connection. You might also encounter some errors or interruptions during the download process that can cause the download to fail or restart.

      -

      Higher battery consumption and device temperature

      -

      Another drawback of downloading data in Arknights is that it can consume a lot of battery power and increase the temperature of your device. This can affect your device's performance and lifespan, as well as your comfort and safety. You might want to plug in your charger and keep your device cool while downloading data to avoid these problems.

      -

      Potential compatibility issues and bugs

      -

      A final drawback of downloading data in Arknights is that it can cause some compatibility issues and bugs with your device or game version. Sometimes, the downloaded data might not match your device's specifications or the game's requirements, resulting in crashes, glitches, errors, etc. You might need to update your device's software or the game's app to fix these issues.

      -

      How to download data in Arknights?

      -

      Now that you know the pros and cons of downloading data in Arknights, let's see how you can actually do it. Here are the steps and tips to follow:

      -

      The steps to download data in Arknights

      -

      The process of downloading data in Arknights is quite simple and straightforward. Here are the steps to follow:

      -

      Launch the game and tap on the settings icon

      -

      The first step is to launch the game and tap on the settings icon on the top right corner of the screen. This will open the settings menu where you can access various options and features.

      -

      Go to the download section and choose your preferred option

      -

      The next step is to go to the download section on the left side of the settings menu. Here, you will see three options: Download All Data, Download Event Data Only, and Download Stage Data Only. You can choose whichever option suits your needs and preferences.

      -
        -
      • Download All Data: This option will download all the available data for the game, including event data, stage data, voice data, etc. This option will take the longest time to complete but will give you the best gaming experience.
      • -
      • Download Event Data Only: This option will download only the data for the current or upcoming events. This option will take less time than Download All Data but will still give you access to new content and rewards.
      • -
      • Download Stage Data Only: This option will download only the data for the stages that you have not cleared yet. This option will take the least time but will require you to download more data later when you enter new stages or events.
      • -
      -

      Wait for the download to complete and restart the game if needed

      -

      The final step is to wait for the download to complete and restart

      The final step is to wait for the download to complete and restart the game if needed. You will see a progress bar and a percentage indicator on the screen that shows you how much data has been downloaded. You can also pause or cancel the download at any time by tapping on the buttons below the progress bar. Once the download is finished, you might need to restart the game to apply the changes and enjoy the new content.

      -

      The tips to optimize your data download in Arknights

      -

      Downloading data in Arknights can be a hassle sometimes, but there are some tips that can help you make it easier and faster. Here are some of them:

      -

      Use a stable and fast Wi-Fi connection

      -

      One of the most important tips to optimize your data download in Arknights is to use a stable and fast Wi-Fi connection. This will ensure that your download speed is high and your download time is low. It will also prevent any errors or interruptions that might occur due to a poor or unstable internet connection. Avoid using mobile data or public Wi-Fi networks that might be slow or unreliable.

      -

      Clear your cache and free up some storage space

      -

      Another tip to optimize your data download in Arknights is to clear your cache and free up some storage space on your device. This will help you avoid any issues related to insufficient memory or storage space that might prevent you from downloading data or launching the game. You can clear your cache by going to the settings menu and tapping on the clear cache button. You can free up some storage space by deleting any unnecessary files or apps on your device.

      -

      Check for updates and patches regularly

      -

      A final tip to optimize your data download in Arknights is to check for updates and patches regularly. This will ensure that your game version is up to date and compatible with the latest data and content. It will also fix any bugs or glitches that might affect your gaming experience. You can check for updates and patches by going to the app store and tapping on the update button if available.

      -

      Conclusion

      -

      Arknights is a fun and addictive tower defense mobile game that offers a lot of content and features for its players. However, to enjoy these content and features, you need to download data in Arknights from time to time. Data download has its pros and cons, but you can make it easier and faster by following the steps and tips we have shared in this article. We hope this article has helped you understand what data download is, why you need it, and how you can do it efficiently in Arknights.

      -

      FAQs

      -

      Here are some frequently asked questions about data download in Arknights:

      -

      How much data do I need to download in Arknights?

      -

      The amount of data you need to download in Arknights depends on the option you choose and the content available. Generally, downloading all data will require more data than downloading event data only or stage data only. The game will show you the estimated size of the data before you start downloading it.

      -

      How long does it take to download data in Arknights?

      -

      The time it takes to download data in Arknights depends on your internet speed, device performance, and data size. Generally, downloading all data will take longer than downloading event data only or stage data only. The game will show you the estimated time of the download before you start downloading it.

      -

      Can I play the game while downloading data in Arknights?

      -

      You can play the game while downloading data in Arknights, but you might experience some issues such as lag, errors, or missing assets. It is recommended that you wait for the download to finish before playing the game to avoid these issues.

      -

      What happens if I cancel or pause the download in Arknights?

      -

      If you cancel or pause the download in Arknights, you can resume it later by going back to the settings menu and tapping on the resume button. However, if you cancel the download, you might lose some of the downloaded data and have to start over again.

      -

      What should I do if I encounter an error or a bug while downloading data in Arknights?

      -

      If you encounter an error or a bug while downloading data in Arknights, you should try the following solutions:

      -
        -
      • Restart the game and try again
      • -
      • Check your internet connection and make sure it is stable and fast
      • -
      • Clear your cache and free up some storage space on your device
      • -
      • Update your device's software or the game's app if available
      • -
      • Contact customer support if none of the above solutions work
      • -
      -

      We hope this article has helped you understand what data download is, why you need it, and how you can do it efficiently in Arknights. If you have any questions or feedback, feel free to leave a comment below. Happy gaming!

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Chess Prince A Chess Game that You Can Play Anytime Anywhere.md b/spaces/congsaPfin/Manga-OCR/logs/Chess Prince A Chess Game that You Can Play Anytime Anywhere.md deleted file mode 100644 index f733465ea561eb81b8fdea191e4a1975833a23fe..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Chess Prince A Chess Game that You Can Play Anytime Anywhere.md +++ /dev/null @@ -1,142 +0,0 @@ -
      -

      How to Download Chess Prince: A Guide for Chess Lovers

      -

      If you are a fan of chess and looking for a fun and challenging game to play on your mobile device, then you should try Chess Prince. Chess Prince is a popular chess app that offers various features and modes to suit your preferences and skill level. In this article, we will show you how to download Chess Prince on your Android or iOS device, and give you some tips and tricks to play better.

      -

      What is Chess Prince?

      -

      Chess Prince is a chess app developed by GENIOUS CODE STUDIO. It is one of the most downloaded chess apps on Google Play and App Store, with millions of players around the world. Chess Prince allows you to play chess against the computer or another player, with ten levels of difficulty to choose from. You can also solve chess puzzles, use the game assistant and hints, earn stars for completing levels without undoing moves, and customize your game with seven different themes and two board views.

      -

      download chess prince


      Download »»» https://urlca.com/2uObE3



      -

      Features of Chess Prince

      -

      Here are some of the main features of Chess Prince that make it a great chess app:

      -
        -
      • Ten levels of difficulty: You can adjust the level of difficulty according to your skill level, from beginner to master. The higher the level, the more challenging the game will be.
      • -
      • Chess puzzles: You can test your chess skills and knowledge by solving chess puzzles. There are hundreds of puzzles available, ranging from easy to hard.
      • -
      • Game assistant (helper): You can use the game assistant to help you make the best move in any situation. The game assistant will show you the possible moves and their consequences.
      • -
      • Hints of moves: You can use hints to get a clue about what move to make next. The hints will show you the best move according to the computer analysis.
      • -
      • Stars for levels completed without the undo button: You can earn stars for completing levels without using the undo button. The more stars you earn, the more achievements you unlock.
      • -
      • Seven different themes: You can change the appearance of your game by choosing from seven different themes, such as wood, marble, metal, blue, green, red, and purple.
      • -
      • Two board views (vertical - 2D and horizontal - 3D): You can change the perspective of your game by choosing from two board views, vertical or horizontal. The vertical view is a classic 2D view, while the horizontal view is a realistic 3D view.
      • -
      • Alternate mode: You can play chess in an alternate mode, where you switch sides with your opponent after every move. This mode is a good way to practice playing from both perspectives.
      • -
      • Two player mode: You can play chess with another player on the same device. You can either play in turn or in alternate mode.
      • -
      • Realistic graphics: The game has realistic graphics and sound effects that enhance your gaming experience.
      • -
      • Save function: You can save your game progress and resume it later.
      • -
      • <.

        Reviews of Chess Prince

        -

        Chess Prince has received many positive reviews from users who enjoy playing chess on their mobile devices. Here are some of the reviews from Google Play and App Store:

        -

        download chess prince app
        -download chess prince game for android
        -download chess prince apk
        -download chess prince offline
        -download chess prince free
        -download chess prince mod
        -download chess prince latest version
        -download chess prince for pc
        -download chess prince for windows 10
        -download chess prince for mac
        -download chess by chess prince
        -download chess puzzles by chess prince
        -download chess with levels by chess prince
        -download chess master by chess prince
        -download chess 3d by chess prince
        -how to download chess prince
        -where to download chess prince
        -best site to download chess prince
        -download chess prince from google play
        -download chess prince from apk pure
        -download chess prince from uptodown
        -download chess prince from apkmirror
        -download chess prince from app store
        -download chess prince from softonic
        -download chess prince from microsoft store
        -download and install chess prince
        -download and play chess prince
        -download and update chess prince
        -download and review chess prince
        -download and rate chess prince
        -why download chess prince
        -benefits of downloading chess prince
        -features of downloading chess prince
        -tips for downloading chess prince
        -tricks for downloading chess prince
        -alternatives to downloading chess prince
        -competitors of downloading chess prince
        -comparison of downloading chess prince
        -pros and cons of downloading chess prince
        -reviews of downloading chess prince
        -ratings of downloading chess prince
        -feedback of downloading chess prince
        -testimonials of downloading chess prince
        -comments of downloading chess prince
        -questions about downloading chess prince
        -answers about downloading chess prince
        -guides for downloading chess prince
        -tutorials for downloading chess prince
        -videos for downloading chess prince.

        - - - - - - - - - - - - - - - - - - - - - -
        UsernameRatingReview
        John Smith5 starsThis is the best chess app I have ever played. The graphics are amazing and the game is very smooth. I love the different themes and board views. The game assistant and hints are very helpful. The puzzles are challenging and fun. I highly recommend this app to anyone who loves chess.
        Mary Jones4 starsI really like this app. It is easy to use and has many features. The levels of difficulty are suitable for beginners and experts alike. The puzzles are a great way to improve your skills. The only thing I would suggest is to add more themes and board views.
        David Lee3 starsThe app is good, but it has some flaws. The game sometimes freezes or crashes. The ads are annoying and pop up too often. The game assistant and hints are sometimes wrong or misleading. The puzzles are too hard or too easy. The app needs some improvement.
        -

        How to Download Chess Prince on Android Devices

        -

        If you have an Android device, you can download Chess Prince from Google Play Store. Here are the steps to follow:

        -

        Step 1: Go to Google Play Store

        -

        Open the Google Play Store app on your Android device. You can find it on your home screen or in your app drawer.

        -

        Step 2: Search for Chess Prince

        -

        In the search bar at the top of the screen, type "Chess Prince" and tap the search icon. You will see a list of results related to your search.

        -

        Step 3: Install the App

        -

        Find the app that has the name "Chess Prince" and the icon of a chess piece with a crown. Tap on it to open its page. You will see some information about the app, such as its description, screenshots, ratings, and reviews. Tap on the green "Install" button to start downloading and installing the app on your device.

        -

        Step 4: Enjoy the Game

        -

        Once the installation is complete, you can open the app by tapping on the "Open" button or by finding it on your home screen or in your app drawer. You can now enjoy playing chess with Chess Prince.

        -

        How to Download Chess Prince on iOS Devices

        -

        If you have an iOS device, you can download Chess Prince from App Store. Here are the steps to follow:

        -

        Step 1: Go to App Store

        -

        Open the App Store app on your iOS device. You can find it on your home screen.

        -

        Step 2: Search for Chess Prince

        -

        In the search bar at the bottom of the screen, type "Chess Prince" and tap the search icon. You will see a list of results related to your search.

        -

        Step 3: Install the App

        -

        Find the app that has the name "Chess Prince" and the icon of a chess piece with a crown. Tap on it to open its page. You will see some information about the app, such as its description, screenshots, ratings, and reviews. Tap on the blue "Get" button to start downloading and installing the app on your device.

        -

        Step 4: Enjoy the Game

        -

        Once the installation is complete, you can open the app by tapping on it or by finding it on your home screen. You can now enjoy playing chess with Chess Prince.

        -

        Tips and Tricks to Play Chess Prince Better

        -

        If you want to play chess better with Chess Prince, here are some tips and tricks that you can use:

        -

        Learn the Basic Rules and Moves of Chess

        -

        If you are new to chess or need a refresher, you can learn the basic rules and moves of chess by using the tutorial mode in Chess Prince. The tutorial mode will teach you how each piece moves, how to capture other pieces, how to check and checkmate your opponent, how to castle, how to promote pawns, how to en passant, and how to draw or stalemate a game.

        -

        Choose the Right Level of Difficulty

        -

        You can choose from ten levels of difficulty in Chess Prince, from beginner to master. You can choose the level that matches your skill level and challenge yourself. The higher the level, the smarter and faster the computer will play. You can also change the level anytime during the game by tapping on the menu icon and selecting "Change level".

        Use the Game Assistant and Hints

        -

        If you are stuck or unsure about what move to make, you can use the game assistant and hints to help you. The game assistant will show you the possible moves and their consequences, such as whether they will result in a check, a capture, or a threat. The hints will show you the best move according to the computer analysis. You can access the game assistant and hints by tapping on the menu icon and selecting "Game assistant" or "Hint". However, keep in mind that using the game assistant and hints will reduce your score and prevent you from earning stars.

        -

        Practice with Chess Puzzles and Challenges

        -

        You can improve your chess skills and knowledge by practicing with chess puzzles and challenges. Chess puzzles are problems that require you to find the best move or sequence of moves to achieve a certain goal, such as checkmate, stalemate, or material advantage. Chess challenges are scenarios that test your ability to handle different situations, such as defending against an attack, exploiting a weakness, or creating a trap. You can access chess puzzles and challenges by tapping on the menu icon and selecting "Puzzles" or "Challenges". There are hundreds of puzzles and challenges available, ranging from easy to hard.

        -

        Try Different Themes and Board Views

        -

        You can change the appearance of your game by trying different themes and board views. Themes are color schemes that affect the look of the board, pieces, and background. Board views are perspectives that affect the angle and orientation of the board. You can choose from seven themes: wood, marble, metal, blue, green, red, and purple. You can choose from two board views: vertical (2D) and horizontal (3D). You can change the theme and board view by tapping on the menu icon and selecting "Theme" or "Board view". You can also swipe left or right on the screen to switch between board views.

        -

        Conclusion

        -

        Chess Prince is a chess app that offers various features and modes to suit your preferences and skill level. You can play chess against the computer or another player, with ten levels of difficulty to choose from. You can also solve chess puzzles, use the game assistant and hints, earn stars for completing levels without undoing moves, and customize your game with seven different themes and two board views. Chess Prince is a great app for chess lovers who want to have fun and challenge themselves.

        -

        FAQs

        -

        Here are some frequently asked questions about Chess Prince:

        -
          -
        • Q: How much does Chess Prince cost?
        • -
        • A: Chess Prince is free to download and play. However, it contains ads that may interrupt your game. You can remove the ads by purchasing the premium version for $1.99.
        • -
        • Q: Is Chess Prince compatible with my device?
        • -
        • A: Chess Prince is compatible with Android devices running Android 4.4 or higher, and iOS devices running iOS 9.0 or higher.
        • -
        • Q: How can I contact the developer of Chess Prince?
        • -
        • A: You can contact the developer of Chess Prince by emailing them at geniouscodestudio@gmail.com or visiting their website at https://geniouscodestudio.com/.
        • -
        • Q: How can I rate and review Chess Prince?
        • -
        • A: You can rate and review Chess Prince by going to Google Play Store or App Store, finding the app page, and tapping on the stars or writing a comment.
        • -
        • Q: How can I share Chess Prince with my friends?
        • -
        • A: You can share Chess Prince with your friends by tapping on the menu icon and selecting "Share". You can then choose from various options to send a link to your friends via email, text message, social media, or other apps.
        • -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Free Fire APK and Join the Battle Royale Craze on Your Mobile Device.md b/spaces/congsaPfin/Manga-OCR/logs/Download Free Fire APK and Join the Battle Royale Craze on Your Mobile Device.md deleted file mode 100644 index 0c5c569d775c769ed8ebeb9dd75ab5c2964189b4..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Free Fire APK and Join the Battle Royale Craze on Your Mobile Device.md +++ /dev/null @@ -1,134 +0,0 @@ - -

        How to Download and Play Free Fire on Your Mobile Device

        -

        If you are looking for a thrilling and exciting survival shooter game on your mobile device, you should definitely try out Free Fire. Free Fire is one of the most popular battle royale games on the mobile platform, with over 1 billion downloads on Google Play Store. In this article, we will show you how to download and play Free Fire on your mobile device, as well as some tips and tricks to help you become a pro in the game.

        -

        free fire download apk


        Download 🔗 https://urlca.com/2uOdXg



        -

        What is Free Fire?

        -

        A popular survival shooter game

        -

        Free Fire is a world-famous survival shooter game available on mobile. Each 10-minute game places you on a remote island where you are pit against 49 other players, all seeking survival. Players freely choose their starting point with their parachute, and aim to stay in the safe zone for as long as possible. Drive vehicles to explore the vast map, hide in the wild, or become invisible by proning under grass or rifts. Ambush, snipe, survive, there is only one goal: to survive and answer the call of duty.

        -

        Features of Free Fire

        -

        Different game modes

        -

        Free Fire offers a variety of exciting game modes for different tastes and preferences. You can play the classic survival mode, where you have to be the last one standing among 50 players. You can also play the clash squad mode, where you have to manage your economy, purchase weapons, and defeat the enemy squad in fast-paced 4v4 matches. You can also play special events and modes that are available for a limited time, such as spider-verse mode, where you can swing around like Spider-Man.

        -

        Various characters and skills

        -

        Free Fire features a character system of 50+ characters. Each character has a unique skill which can be active or passive. Active skills can be triggered manually by an in-game button and passive skills are triggered automatically. You can choose your favorite character based on their appearance, backstory, and skill. Some of the popular characters are Alok, K, Chrono, Kelly, Moco, Jota, and more.

        -

        Realistic and smooth graphics

        -

        Free Fire boasts realistic and smooth graphics that promise the optimum survival experience you will find on mobile. You can enjoy stunning visuals of the maps, weapons, vehicles, characters, and effects. You can also customize your graphics settings according to your device performance and preference. You can also use BlueStacks to play Free Fire on your PC with enhanced graphics and controls.

        -

        How to Download Free Fire APK?

        -

        Steps to download from Google Play Store

        -

        The easiest way to download Free Fire APK is from Google Play Store. All you need to do is follow these simple steps:

        -
          -
        1. Open Google Play Store on your mobile device.
        2. -
        3. Search for "Free Fire" or "Garena Free Fire" in the search bar.
        4. -
        5. Select the app with the icon of a burning skull and tap on install.
        6. -
        7. Wait for the app to download and install on your device.
        8. -
        9. Open the app and enjoy playing Free Fire.
        10. -
        -

        Note: You may need to grant some permissions to the app, such as access to your storage, microphone, and location. You may also need to update the app regularly to get the latest features and bug fixes.

        -

        Steps to download from APKPure.com

        -

        If you cannot access Google Play Store or want to download Free Fire APK from another source, you can use APKPure.com. APKPure.com is a trusted website that provides safe and fast downloads of various apps and games. Here are the steps to download Free Fire APK from APKPure.com:

        -

        free fire download apk latest version
        -free fire download apk and obb
        -free fire download apk pure
        -free fire download apk for pc
        -free fire download apk mod
        -free fire download apk 2023
        -free fire download apk unlimited diamonds
        -free fire download apk hack
        -free fire download apk in jio phone
        -free fire download apk 1.99.1
        -free fire download apk uptodown
        -free fire download apk rexdl
        -free fire download apk android 4.4.2
        -free fire download apk and data
        -free fire download apk new update
        -free fire download apk old version
        -free fire download apk max
        -free fire download apk spider verse
        -free fire download apk plus obb
        -free fire download apk highly compressed
        -free fire download apk no verification
        -free fire download apk offline
        -free fire download apk 1.99.0
        -free fire download apk 1.98.1
        -free fire download apk 1.97.0
        -free fire download apk 1.96.0
        -free fire download apk 1.95.0
        -free fire download apk 1.94.0
        -free fire download apk 1.93.0
        -free fire download apk 1.92.0
        -free fire download apk 1.91.0
        -free fire download apk 1.90.0
        -free fire download apk 1.89.0
        -free fire download apk 1.88.0
        -free fire download apk 1.87.0
        -free fire download apk 1.86.0
        -free fire download apk 1.85.0
        -free fire download apk 1.84.0
        -free fire download apk 1.83.0
        -free fire download apk 1.82.0
        -free fire download apk 1.81.0
        -free fire download apk 1.80.0
        -free fire download apk 1.79.0
        -free fire download apk 1.78.0
        -free fire download apk 1.77.0
        -free fire download apk 1.76.0
        -free fire download apk 1.75.0
        -free fire download apk 1.74.0

        -
          -
        1. Open your browser and go to https://apkpure.com/garena-free-fire-rampage/com.dts.freefireth.
        2. -
        3. Tap on the green "Download APK" button and wait for the file to download.
        4. -
        5. Locate the downloaded file in your device's file manager and tap on it to install.
        6. -
        7. If you see a warning message that says "Install blocked", go to your device's settings and enable "Unknown sources" or "Allow from this source".
        8. -
        9. Once the installation is complete, open the app and enjoy playing Free Fire.
        10. -
        -

        Note: You may need to update the app manually by downloading the latest version from APKPure.com whenever there is a new update available.

        -

        Steps to download from BlueStacks

        -

        If you want to play Free Fire on your PC, you can use BlueStacks. BlueStacks is a powerful Android emulator that allows you to run Android apps and games on your PC with enhanced graphics and controls. Here are the steps to download Free Fire APK from BlueStacks:

        -
          -
        1. Download and install BlueStacks on your PC from https://www.bluestacks.com/.
        2. -
        3. Launch BlueStacks and sign in with your Google account.
        4. -
        5. Go to the "Game Center" tab and search for "Free Fire" or "Garena Free Fire" in the search bar.
        6. -
        7. Select the app with the icon of a burning skull and click on "Install".
        8. -
        9. Wait for the app to download and install on BlueStacks.
        10. -
        11. Open the app and enjoy playing Free Fire on your PC.
        12. -
        -

        Note: You can customize your keyboard and mouse settings by clicking on the gear icon on the top right corner of BlueStacks. You can also use BlueStacks' features such as multi-instance, macro recorder, game controls, and more to enhance your gaming experience.

        -

        How to Play Free Fire on Your Mobile Device?

        -

        Tips and tricks for beginners

        -

        If you are new to Free Fire, you may find it challenging to survive and win in the game. Don't worry, we have some tips and tricks for beginners that will help you improve your skills and have fun in the game.

        -

        Never loot in the open

        -

        Looting is an essential part of Free Fire, as you need to find weapons, ammo, armor, healing items, and other useful items. However, looting in the open is very risky, as you expose yourself to enemy fire. Always try to loot inside buildings or behind cover, and avoid looting dead bodies unless you are sure it is safe. Also, don't spend too much time looting, as you may miss out on important opportunities or get caught by the shrinking zone.

        -

        Use the thumb layout

        -

        The thumb layout is one of the most common and comfortable control layouts for mobile gaming. It allows you to use both thumbs to move, aim, shoot, jump, crouch, prone, reload, switch weapons, and use items. You can adjust the size and position of each button according to your preference in the settings menu. Using the thumb layout will help you perform better in combat and react faster to different situations.

        -

        Enable the default aim precision

        -

        The default aim precision is a feature that helps you aim more accurately at enemies. It automatically adjusts your crosshair according to the distance and movement of your target. You can enable this feature in the settings menu under "Controls". Enabling the default aim precision will help you land more shots and eliminate enemies faster.

        -

        Adjust your sensitivity settings

        -

        The sensitivity settings affect how fast or slow your camera moves when you swipe or tilt your screen. You can adjust your sensitivity settings according to your device performance and preference in the settings menu under "Sensitivity". You can also use different sensitivity settings for different scopes, such as red dot, 2x, 4x, and 8x. Adjusting your sensitivity settings will help you aim more smoothly and precisely at different ranges.

        -

        Never unnecessarily jump-shoot

        -

        Jump-shooting is a technique that involves jumping and shooting at the same time. It can be useful in some situations, such as when you need to dodge enemy fire or surprise your opponent. However, jump-shooting also has some disadvantages, such as reducing your accuracy, exposing your head, and making you predictable. Therefore, you should never unnecessarily jump-shoot, especially when you are in close range or have a clear shot. Instead, you should use other techniques, such as crouching, proning, strafing, or peeking to gain an advantage in combat.

        -

        Tips and tricks for advanced players

        -

        If you are already familiar with Free Fire and want to take your game to the next level, you may need some tips and tricks for advanced players. These tips and tricks will help you improve your strategy, tactics, and skills in the game.

        -

        Take fights in the shrink zone

        -

        The shrink zone is the area outside the safe zone that gradually shrinks and damages players who are inside it. Most players tend to avoid the shrink zone and move towards the safe zone as soon as possible. However, this also makes them vulnerable to enemies who are waiting for them in the safe zone. Therefore, a smart strategy is to take fights in the shrink zone, as you can catch your enemies off guard and eliminate them easily. You can also loot their items and heal yourself with medkits or mushrooms. However, you should also be careful of the shrink zone damage and time your movements accordingly.

        -

        Use the claw control

        -

        The claw control is an advanced control layout that involves using four fingers to play Free Fire. It allows you to use your index fingers to aim, shoot, jump, crouch, prone, reload, switch weapons, and use items, while using your thumbs to move and look around. You can enable this layout in the settings menu under "Controls". Using the claw control will give you more flexibility and speed in combat and allow you to perform multiple actions at once.

        -

        Use the jump-shoot at the right time

        -

        As we mentioned earlier, jump-shooting can be useful in some situations, but not in others. Therefore, you should know when to use it and when not to use it. The right time to use jump-shooting is when you are in mid or long range, have a scope on your weapon, have cover nearby, and have enough health. The wrong time to use jump-shooting is when you are in close range, have no scope on your weapon, have no cover nearby, and have low health. Using the jump-shoot at the right time will help you surprise your enemies and deal more damage to them.

        -

        Use vehicles wisely

        -

        Vehicles are a great way to travel across the map quickly and safely. However, they also have some drawbacks, such as making noise, consuming fuel, and attracting attention. Therefore, you should use vehicles wisely and not rely on them too much. You should only use vehicles when you need to escape from danger or reach a certain location fast. You should also avoid driving vehicles in the late game stages or near buildings or compounds where enemies may be hiding. You should also park your vehicles behind cover or away from sight when you get off them.

        -

        Communicate with your squad

        -

        Communication is key to winning in Free Fire, especially when you are playing with your squad. You should always communicate with your squad members using voice chat or quick chat messages. You should share information such as enemy locations, loot, weapons, skills, and strategies. You should also coordinate your actions, such as landing, moving, fighting, and reviving. You should also support your squad members, such as giving them items, covering them, or saving them. Communicating with your squad will help you work as a team and increase your chances of winning.

        -

        Conclusion

        -

        Free Fire is a fun and addictive survival shooter game that you can download and play on your mobile device. You can enjoy different game modes, characters, skills, and graphics in the game. You can also use various tips and tricks to improve your performance and skills in the game. We hope this article has helped you learn how to download and play Free Fire on your mobile device. Now, what are you waiting for? Download Free Fire APK today and join the ultimate survival adventure!

        -

        FAQs

        -

        Q: Is Free Fire free to play?

        -

        A: Yes, Free Fire is free to play. However, you can also purchase in-game items such as diamonds, skins, characters, and more with real money.

        -

        Q: Is Free Fire safe to download?

        -

        A: Yes, Free Fire is safe to download from Google Play Store or APKPure.com. However, you should always be careful of downloading from unknown or untrusted sources, as they may contain viruses or malware.

        -

        Q: Can I play Free Fire offline?

        -

        A: No, you cannot play Free Fire offline. You need an internet connection to play the game.

        -

        Q: Can I play Free Fire with friends?

        -

        A: Yes, you can play Free Fire with friends. You can invite your friends to join your squad or join their squad in the game. You can also chat with them using voice chat or quick chat messages.

        -

        Q: Can I play Free Fire on PC?

        -

        A: Yes, you can play Free Fire on PC using BlueStacks. BlueStacks is an Android emulator that allows you to run Android apps and games on your PC with enhanced graphics and controls.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download One Piece World Seeker and Face Off Against Luffys Enemies.md b/spaces/congsaPfin/Manga-OCR/logs/Download One Piece World Seeker and Face Off Against Luffys Enemies.md deleted file mode 100644 index 7426f675a9b92ee5866675f8d2c4c4fdffeac84b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download One Piece World Seeker and Face Off Against Luffys Enemies.md +++ /dev/null @@ -1,132 +0,0 @@ - -

        How to Download One Piece World Seeker

        -

        If you are a fan of One Piece, the popular manga and anime series by Eiichiro Oda, you might be interested in playing One Piece World Seeker, an action-adventure video game based on the franchise. In this article, we will tell you everything you need to know about this game, including what it is, why you should play it, where you can download it, and what are the different editions available. So, let's get started!

        -

        What is One Piece World Seeker?

        -

        One Piece World Seeker is a video game developed by Ganbarion and published by Bandai Namco Entertainment. It is the first video game in the franchise to feature an open world environment, where you can explore a vast and seamless island as Monkey D. Luffy, the protagonist of One Piece. The game was released on March 15, 2019 for PC, PlayStation 4, and Xbox One.

        -

        download one piece world seeker


        Downloadhttps://urlca.com/2uOcCk



        -

        Why should you play One Piece World Seeker?

        -

        There are many reasons why you should play One Piece World Seeker, but here are some of the main ones:

        -
          -
        • The Freedom of the Pirate King: You can experience the powers of Luffy's Gum-Gum fruit, which allows him to stretch his limbs and swing into action. You can also use his powerful Haki abilities to sense enemies and unleash devastating attacks. You can explore different areas of the island, such as cities, farms, beaches, mines, and prisons, and interact with various objects and characters.
        • -
        • An Original Story with Original Characters: The game features an original story that takes place on Prison Island, a mysterious island that is under the control of the Navy. The Straw Hat Pirates arrive on the island and get involved in a dramatic story full of twists and turns. The game also includes original characters designed by Eiichiro Oda himself, such as Jeanne, a young woman who leads a rebel group against the Navy; Isaac, a former Marine scientist who rules over Prison Island; and Karakuri Island Automata (KIA), mechanical soldiers created by Isaac.
        • -
        • Fierce Battles Between Popular Characters: The game lets you fight against some of the most iconic enemies from the One Piece series, such as Crocodile, Rob Lucci, Akainu, Kizaru, and more. You can also encounter some of the allies and friends of Luffy, such as Sabo, Law, Hancock, and more. You can enjoy the dynamic and thrilling combat system that combines melee, ranged, and stealth attacks.
        • -
        -

        Where can you download One Piece World Seeker?

        -

        One Piece World Seeker is available for download on PC, PlayStation 4, and Xbox One. Here are the steps on how to download the game for each platform:

        -

        Download One Piece World Seeker for PC

        -

        If you want to play One Piece World Seeker on your PC, you need to download it from Steam, the online gaming platform. Here are the steps to do so:

        -
          -
        1. Create a Steam account if you don't have one already. You can do this by visiting https://store.steampowered.com/join/ and following the instructions.
        2. -
        3. Download and install the Steam client on your PC. You can do this by visiting https://store.steampowered.com/about/ and clicking on the "Install Steam" button.
        4. -
        5. Launch the Steam client and log in with your Steam account.
        6. -
        7. Search for One Piece World Seeker in the Steam store. You can do this by typing the name of the game in the search bar or browsing through the categories.
        8. -
        9. Select One Piece World Seeker from the search results and click on the "Add to Cart" button.
        10. -
        11. Proceed to checkout and pay for the game using your preferred payment method.
        12. -
        13. Wait for the game to download and install on your PC. You can check the progress of the download in the "Library" section of the Steam client.
        14. -
        15. Once the game is installed, you can launch it from your Steam library and enjoy playing it.
        16. -
        -

        System Requirements for PC

        -

        Before you download One Piece World Seeker for PC, you need to make sure that your PC meets the minimum or recommended system requirements for the game. Here is a table of the system requirements for PC:

        -

        download one piece world seeker pc
        -download one piece world seeker free
        -download one piece world seeker full version
        -download one piece world seeker crack
        -download one piece world seeker steam
        -download one piece world seeker deluxe edition
        -download one piece world seeker episode pass
        -download one piece world seeker torrent
        -download one piece world seeker fitgirl repack
        -download one piece world seeker codex
        -download one piece world seeker update
        -download one piece world seeker dlc
        -download one piece world seeker mods
        -download one piece world seeker trainer
        -download one piece world seeker save file
        -download one piece world seeker highly compressed
        -download one piece world seeker for android
        -download one piece world seeker for ps4
        -download one piece world seeker for xbox one
        -download one piece world seeker for switch
        -download one piece world seeker gameplay
        -download one piece world seeker review
        -download one piece world seeker walkthrough
        -download one piece world seeker guide
        -download one piece world seeker tips and tricks
        -download one piece world seeker cheats
        -download one piece world seeker hack
        -download one piece world seeker patch notes
        -download one piece world seeker system requirements
        -download one piece world seeker wallpaper
        -download one piece world seeker soundtrack
        -download one piece world seeker ost
        -download one piece world seeker opening song
        -download one piece world seeker ending song
        -download one piece world seeker characters
        -download one piece world seeker luffy moveset
        -download one piece world seeker zoro gameplay
        -download one piece world seeker sabo gameplay
        -download one piece world seeker law gameplay
        -download one piece world seeker map locations
        -download one piece world seeker side missions
        -download one piece world seeker treasure chests
        -download one piece world seeker blueprints
        -download one piece world seeker outfits and costumes
        -download one piece world seeker skill tree and upgrades
        -download one piece world seeker karma system and factions
        -download one piece world seeker photo mode and screenshots
        -download one piece world seeker easter eggs and secrets
        -download one piece world seeker best weapons and equipment

        - | Minimum | Recommended | | --- | --- | | OS: Windows 7 64-bit SP1 | OS: Windows 10 64-bit | | Processor: Intel Core i5-2300 or AMD A10-7850K | Processor: Intel Core i7-3770 or AMD FX-8350 | | Memory: 4 GB RAM | Memory: 8 GB RAM | | Graphics: GeForce GTX 660 or Radeon HD 7950 | Graphics: GeForce GTX 1060 or Radeon RX 580 | | DirectX: Version 11 | DirectX: Version 11 | | Storage: 25 GB available space | Storage: 25 GB available space | | Sound Card: DirectX compatible soundcard or onboard chipset | Sound Card: DirectX compatible soundcard or onboard chipset |

        Download One Piece World Seeker for PlayStation 4

        -

        If you want to play One Piece World Seeker on your PlayStation 4, you need to download it from PlayStation Store, the online gaming platform. Here are the steps to do so:

        -
          -
        1. Create a PlayStation Network account if you don't have one already. You can do this by visiting https://www.playstation.com/en-us/network/onlineid/ and following the instructions.
        2. -
        3. Download and install the PlayStation Store app on your PlayStation 4. You can do this by selecting "PlayStation Store" from the home screen of your console.
        4. -
        5. Launch the PlayStation Store app and log in with your PlayStation Network account.
        6. -
        7. Search for One Piece World Seeker in the PlayStation Store. You can do this by typing the name of the game in the search bar or browsing through the categories.
        8. -
        9. Select One Piece World Seeker from the search results and click on the "Add to Cart" button.
        10. -
        11. Proceed to checkout and pay for the game using your preferred payment method.
        12. -
        13. Wait for the game to download and install on your PlayStation 4. You can check the progress of the download in the "Notifications" section of your console.
        14. -
        15. Once the game is installed, you can launch it from your home screen and enjoy playing it.
        16. -
        -

        System Requirements for PlayStation 4

        -

        Before you download One Piece World Seeker for PlayStation 4, you need to make sure that your PlayStation 4 meets the minimum or recommended system requirements for the game. Here is a table of the system requirements for PlayStation 4:

        - | Minimum | Recommended | | --- | --- | | OS: PlayStation 4 | OS: PlayStation 4 Pro | | Processor: AMD Jaguar 8-core | Processor: AMD Jaguar 8-core | | Memory: 8 GB GDDR5 | Memory: 8 GB GDDR5 | | Graphics: AMD Radeon GCN 1.84 TFLOPS | Graphics: AMD Radeon GCN 4.2 TFLOPS | | Storage: 25 GB available space | Storage: 25 GB available space |

        Download One Piece World Seeker for Xbox One

        -

        If you want to play One Piece World Seeker on your Xbox One, you need to download it from Microsoft Store, the online gaming platform. Here are the steps to do so:

        -
          -
        1. Create a Microsoft account if you don't have one already. You can do this by visiting https://account.microsoft.com/account and following the instructions.
        2. -
        3. Download and install the Microsoft Store app on your Xbox One. You can do this by selecting "Microsoft Store" from the home screen of your console.
        4. -
        5. Launch the Microsoft Store app and log in with your Microsoft account.
        6. -
        7. Search for One Piece World Seeker in the Microsoft Store. You can do this by typing the name of the game in the search bar or browsing through the categories.
        8. -
        9. Select One Piece World Seeker from the search results and click on the "Buy" button.
        10. -
        11. Proceed to checkout and pay for the game using your preferred payment method.
        12. -
        13. Wait for the game to download and install on your Xbox One. You can check the progress of the download in the "My games & apps" section of your console.
        14. -
        15. Once the game is installed, you can launch it from your home screen and enjoy playing it.
        16. -
        -

        System Requirements for Xbox One

        -

        Before you download One Piece World Seeker for Xbox One, you need to make sure that your Xbox One meets the minimum or recommended system requirements for the game. Here is a table of the system requirements for Xbox One:

        - | Minimum | Recommended | | --- | --- | | OS: Xbox One | OS: Xbox One X | | Processor: AMD Jaguar 8-core | Processor: AMD Jaguar 8-core | | Memory: 8 GB DDR3 | Memory: 12 GB GDDR5 | | Graphics: AMD Radeon GCN 1.31 TFLOPS | Graphics: AMD Radeon GCN 6 TFLOPS | | Storage: 25 GB available space | Storage: 25 GB available space |

        What are the different editions of One Piece World Seeker?

        -

        One Piece World Seeker has three different editions that you can choose from, depending on your budget and preferences. They are the standard edition, the deluxe edition, and the pirate king edition. Here is a comparison of what each edition offers:

        -

        Standard Edition

        -

        The standard edition of One Piece World Seeker is the basic version of the game that includes only the main game itself. It costs $59.99 USD. If you pre-ordered the standard edition, you also received some bonus items, such as a swimsuit outfit for Luffy, a military outfit for Luffy, and a quest called "Strange Island Rocks".

        -

        Deluxe Edition

        -

        The deluxe edition of One Piece World Seeker is an upgraded version of the game that includes not only the main game, but also an episode pass that gives you access to three additional episodes that expand the story and gameplay of the game. The episode pass also includes some extra items, such as a raid suit for Luffy, a kung fu outfit for Luffy, and a white suit outfit for Luffy. The deluxe edition costs $89.99 USD.

        -

        Pirate King Edition

        -

        The pirate king edition of One Piece World Seeker is the ultimate version of the game that includes everything from the deluxe edition, plus some exclusive physical items that are perfect for collectors and fans of One Piece. The pirate king edition includes a figurine of Luffy in his Gear Fourth form, a replica of Luffy's straw hat, a CD with selected tracks from the game's soundtrack, and a season pass that gives you access to all future DLCs for the game. The pirate king edition costs $129.99 USD.

        -

        Conclusion

        -

        One Piece World Seeker is an amazing video game that lets you experience the world of One Piece like never before. You can explore a vast and beautiful island as Luffy and use his amazing abilities to fight against his enemies and allies. You can also enjoy an original story with original characters that are designed by the creator of One Piece himself, Eiichiro Oda. You can download the game for PC, PlayStation 4, or Xbox One from various online platforms, and choose from different editions that offer different content and bonuses. If you are looking for a fun and immersive game that will make you feel like a pirate king, you should definitely try One Piece World Seeker. You won't regret it!

        -

        FAQs

        -

        Here are some of the frequently asked questions about One Piece World Seeker:

        -
          -
        • Q: How long is the game?
        • -
        • A: The game's main story takes about 15 to 20 hours to complete, depending on your playstyle and difficulty level. The game also has many side quests and activities that can extend the gameplay time to over 40 hours.
        • -
        • Q: Can you play as other characters besides Luffy?
        • -
        • A: No, you can only play as Luffy in the game. However, you can interact with other characters from the One Piece series, and some of them will join you as support characters in combat.
        • -
        • Q: Can you customize Luffy's appearance and skills?
        • -
        • A: Yes, you can change Luffy's outfits and accessories in the game, as well as upgrade his skills and abilities using skill points that you earn by completing missions and defeating enemies.
        • -
        • Q: Is the game multiplayer or co-op?
        • -
        • A: No, the game is single-player only. There is no online or local multiplayer or co-op mode in the game.
        • -
        • Q: Is the game canon to the One Piece series?
        • -
        • A: The game is not canon to the One Piece series, but it is an original story that is supervised by Eiichiro Oda himself. The game takes place in an alternate timeline after the Whole Cake Island arc of the manga and anime.
        • -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Tank Hero for Android and Enjoy 120 Levels of Tank Battles.md b/spaces/congsaPfin/Manga-OCR/logs/Download Tank Hero for Android and Enjoy 120 Levels of Tank Battles.md deleted file mode 100644 index 9eab0bf85b2bbb0d302961d2435fbb7d4a895b2f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Tank Hero for Android and Enjoy 120 Levels of Tank Battles.md +++ /dev/null @@ -1,116 +0,0 @@ - -

        Tank Hero: A Fast-Paced 3D Tank Action Game for Android

        -

        If you are looking for a fun and addictive tank game for your Android device, you should check out Tank Hero. This is a game that will test your skills and reflexes as you battle against cunning enemies in various arenas. In this article, we will tell you what Tank Hero is, how to download it from Uptodown, and how to play it like a pro.

        -

        tank hero game download uptodown


        Download File ☆☆☆ https://urlca.com/2uOemI



        -

        What is Tank Hero?

        -

        Tank Hero is a 3D tank action game developed by Clapfoot Inc. It was released in 2011 and has been played by over 4 million users around the world. It is available for iOS and Android devices, but in this article, we will focus on the Android version.

        -

        The gameplay and features of Tank Hero

        -

        The gameplay of Tank Hero is simple but challenging. You control a green tank with a virtual joystick or swipe gestures, and you tap on the screen to fire at your enemies. You have to destroy all the enemy tanks before they destroy you, while avoiding their bullets and missiles. You can also use obstacles and walls as cover, or bounce your shots off them to hit your targets.

        -

        Tank Hero has three game modes: Campaign, Survival, and Time Trial. In Campaign mode, you have to complete 80 levels across two worlds, each with different enemies and environments. In Survival mode, you have to survive as long as possible against endless waves of enemies. In Time Trial mode, you have to clear each level as fast as possible.

        -

        tank hero apk download uptodown
        -tank hero android game free download uptodown
        -tank hero mod apk download uptodown
        -tank hero game for pc download uptodown
        -tank hero laser wars download uptodown
        -tank hero 3d game download uptodown
        -tank hero online game download uptodown
        -tank hero game latest version download uptodown
        -tank hero game hack download uptodown
        -tank hero game offline download uptodown
        -tank hero fun action game download uptodown
        -tank hero game review uptodown
        -tank hero game guide uptodown
        -tank hero game tips and tricks uptodown
        -tank hero game cheats uptodown
        -tank hero game features uptodown
        -tank hero game screenshots uptodown
        -tank hero game video uptodown
        -tank hero game ratings uptodown
        -tank hero game similar apps uptodown
        -tank hero game alternatives uptodown
        -tank hero game old versions uptodown
        -tank hero game update history uptodown
        -tank hero game technical information uptodown
        -tank hero game requirements uptodown
        -tank hero game size uptodown
        -tank hero game developer uptodown
        -tank hero game support uptodown
        -tank hero game feedback uptodown
        -tank hero game comments and opinions uptodown
        -how to download and install tank hero game from uptodown
        -how to play tank hero game on uptodown
        -how to uninstall tank hero game from uptodown
        -how to update tank hero game on uptodown
        -how to fix tank hero game errors on uptodown
        -how to backup and restore tank hero game on uptodown
        -how to share tank hero game with friends on uptodown
        -how to customize tank hero game settings on uptodown
        -how to unlock all tanks in tank hero game on uptodown
        -how to earn coins and gems in tank hero game on uptodown
        -how to level up and upgrade tanks in tank hero game on uptodown
        -how to complete missions and challenges in tank hero game on uptodown
        -how to use power-ups and weapons in tank hero game on uptodown
        -how to join and create clans in tank hero game on uptodown
        -how to chat and communicate with other players in tank hero game on uptodown
        -how to participate in tournaments and events in tank hero game on uptodown
        -how to rank and compete with other players in tank hero game on uptodown
        -how to get free rewards and bonuses in tank hero game on uptodown

        -

        Tank Hero also has five weapons to choose from: cannons, heat seekers, grenades, howitzers, and lasers. Each weapon has its own advantages and disadvantages, and you can switch between them during the game. You can also unlock new tanks with different attributes and abilities.

        -

        The graphics and sound effects of Tank Hero

        -

        Tank Hero has impressive graphics for a mobile game. It uses OpenGL 3D graphics with real-time shadows and dynamic lighting, creating a realistic and immersive experience. The tanks and the environments are well-designed and detailed, and the explosions and smoke effects are satisfying. The game also runs smoothly on most devices, without any lag or glitches.

        -

        The sound effects of Tank Hero are also great. They add to the excitement and tension of the game, with realistic sounds of gunfire, explosions, engines, and metal clashing. The game also has a catchy background music that matches the mood of each level.

        -

        How to download Tank Hero from Uptodown?

        -

        If you want to download Tank Hero for your Android device, you can do so from Uptodown. Uptodown is an alternative app store that offers thousands of apps and games for free. You can find apps that are not available on Google Play Store, or download older versions of apps that are compatible with your device.

        -

        The benefits of using Uptodown as an alternative app store

        -

        There are many benefits of using Uptodown as an alternative app store for your Android device. Some of them are:

        -
          -
        • You can download apps and games without any restrictions or limitations.
        • -
        • You can access apps that are not

          available on Google Play Store, such as Tank Hero.

        • -
        • You can download older versions of apps that are compatible with your device or have features that you prefer.
        • -
        • You can update your apps automatically or manually, without any hassle.
        • -
        • You can enjoy a user-friendly and secure interface, with no ads or malware.
        • -
        -

        The steps to download and install Tank Hero from Uptodown

        -

        To download and install Tank Hero from Uptodown, you need to follow these steps:

        -
          -
        1. Go to the Uptodown website or app and search for Tank Hero. You can also use this link: Tank Hero 1.5.8 for Android - Download.
        2. -
        3. Click on the green Download button and wait for the APK file to be downloaded to your device.
        4. -
        5. Before installing the APK file, you need to enable the Unknown Sources option on your device. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
        6. -
        7. Once you have enabled the Unknown Sources option, locate the APK file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.
        8. -
        9. After the installation is complete, you can launch Tank Hero from your app drawer or home screen and enjoy the game.
        10. -
        -

        How to play Tank Hero like a pro?

        -

        Now that you have downloaded and installed Tank Hero from Uptodown, you might be wondering how to play it like a pro. Here are some tips and tricks that will help you master the game and have more fun.

        -

        The tips and tricks to master the game

        -

        Here are some tips and tricks that will help you master Tank Hero:

        -
          -
        • Learn how to use each weapon effectively. Each weapon has its own strengths and weaknesses, and you need to know when and how to use them. For example, cannons are good for direct shots, heat seekers are good for tracking enemies, grenades are good for area damage, howitzers are good for long-range shots, and lasers are good for piercing through multiple enemies.
        • -
        • Use the environment to your advantage. You can use obstacles and walls as cover, or bounce your shots off them to hit your targets. You can also destroy some objects in the environment, such as barrels and crates, to cause damage or create distractions.
        • -
        • Be aware of your surroundings. You need to keep an eye on your enemies, their positions, their movements, their weapons, and their bullets. You also need to watch out for power-ups that can boost your health, speed, or firepower. You can also find hidden items that can unlock new weapons and tanks.
        • -
        • Be strategic and flexible. You need to plan your moves ahead, but also be ready to adapt to changing situations. You need to choose the best weapon and strategy for each level and mode, but also be prepared to switch them if necessary. You also need to balance offense and defense, knowing when to attack and when to retreat.
        • -
        -

        The best weapons and strategies to use in different modes and levels

        -

        Here are some of the best weapons and strategies to use in different modes and levels of Tank Hero:

        - - - - - - - -
        Mode/LevelWeaponStrategy
        Campaign mode - World 1CannonThis is the default weapon that you start with. It is good for direct shots and has a fast reload time. Use it to take out enemies quickly and accurately.
        Campaign mode - World 2GrenadeThis is a weapon that you unlock in World 2. It is good for area damage and has a high impact force. Use it to blast enemies in groups or behind cover.
        Survival modeLaserThis is a weapon that you unlock in World 2. It is good for piercing through multiple enemies and has a long range. Use it to clear waves of enemies efficiently and safely.
        Time Trial modeHeat SeekerThis is a weapon that you unlock in World 1. It is good for tracking enemies and has a high speed. Use it to hit moving targets easily and quickly.
        Boss levelsHowitzerThis is a weapon that you unlock in World 1. It is good for long-range shots and has a high damage. Use it to deal massive damage to the boss and avoid its attacks.
        -

        Conclusion

        -

        Tank Hero is a fast-paced 3D tank action game that will keep you entertained and challenged for hours. You can download it from Uptodown, an alternative app store that offers many benefits and features. You can also use our tips and tricks to master the game and have more fun. So what are you waiting for? Download Tank Hero today and become the ultimate tank hero!

        -

        FAQs

        -

        What are the minimum requirements to play Tank Hero on Android?

        -

        To play Tank Hero on Android, you need to have a device that runs on Android 4.1 or higher, and has at least 50 MB of free storage space.

        -

        How many levels are there in Tank Hero?

        -

        There are 80 levels in Tank Hero, divided into two worlds: Desert and Ice. Each world has 40 levels, with different enemies and environments.

        -

        How can I unlock new weapons and tanks in Tank Hero?

        -

        You can unlock new weapons and tanks in Tank Hero by completing levels, finding hidden items, or buying them with coins. Coins are earned by playing the game, or by watching ads or completing offers.

        -

        Is Tank Hero a multiplayer game?

        -

        No, Tank Hero is not a multiplayer game. It is a single-player game that you can play offline or online.

        -

        Is Tank Hero safe to download from Uptodown?

        -

        Yes, Tank Hero is safe to download from Uptodown. Uptodown is a reputable and secure app store that scans all the apps and games for viruses and malware. You can download Tank Hero from Uptodown without any risk or worry.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the Classic DMC Characters and Gameplay on Your Phone - Download Devil May Cry Peak of Combat APK.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy the Classic DMC Characters and Gameplay on Your Phone - Download Devil May Cry Peak of Combat APK.md deleted file mode 100644 index 1effbf38da799beab75f42785079e01fad064007..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the Classic DMC Characters and Gameplay on Your Phone - Download Devil May Cry Peak of Combat APK.md +++ /dev/null @@ -1,104 +0,0 @@ -
        -

        Devil May Cry: Peak of Combat APK Free Download

        -

        If you are a fan of the action-adventure hack-and-slash video game series Devil May Cry, you might be interested in playing its official mobile spin-off, Devil May Cry: Peak of Combat. This game lets you enjoy the exhilarating and stylish combat system of the DMC franchise on your Android device. However, this game is not available on the Google Play Store, so you need to download and install its APK file from a third-party source. In this article, we will tell you everything you need to know about Devil May Cry: Peak of Combat APK, including its features, how to download and install it, and its pros and cons.

        -

        What is Devil May Cry: Peak of Combat?

        -

        Devil May Cry: Peak of Combat is a free action mobile video game developed by NebulaJoy, also known as Yunchang Game, with the participation of the official team of Capcom Devil May Cry. It is an authorized mobile game for the DMC franchise and features classic characters with their signature play styles. The game follows the story of Dante, Nero, Vergil, and Lady, who are demon hunters that fight against various forces of evil. The game has a fast-paced 3D adventure that showcases the flexible and fun combo system of the DMC series. The game also has impressive graphics and voice-overs that bring the characters to life.

        -

        devil may cry peak of combat apk free download


        Download Ziphttps://urlca.com/2uOghs



        -

        Features of Devil May Cry: Peak of Combat

        -

        Flexible combat system

        -

        One of the main attractions of Devil May Cry: Peak of Combat is its flexible combat system that allows you to juggle enemies in mid-air with your attacks and rack up combo points for higher style rankings. You can use different weapons, skills, and items to create your own fighting style and unleash devastating moves on your foes. You can also switch between characters during battles to take advantage of their unique abilities and weapons.

        -

        Classic characters and voice-overs

        -

        Another feature of Devil May Cry: Peak of Combat is its classic characters and voice-overs that make you feel like you are playing the original DMC series. You can play as Dante, Nero, Vergil, and Lady, who are all voiced by their original actors from the DMC franchise. Each character has their own personality, backstory, and dialogue that add depth and humor to the game. You can also customize their appearance and outfits to suit your preferences.

        -

        Stunning graphics and sound effects

        -

        Last but not least, Devil May Cry: Peak of Combat has stunning graphics and sound effects that enhance the gaming experience. The game has high-quality 3D models and animations that capture the details and expressions of the characters. The game also has realistic lighting and shadow effects that create a dynamic and immersive environment. The game also has epic soundtracks and sound effects that match the mood and intensity of the battles.

        -

        How to download and install Devil May Cry: Peak of Combat APK?

        -

        Requirements for Devil May Cry: Peak of Combat APK

        -

        Before you download and install Devil May Cry: Peak of Combat APK, you need to make sure that your device meets the following requirements:

        -
          -
        • Your device must have Android 9.0 or higher.
        • -
        • Your device must have at least 3 GB of RAM.
        • -
        • Your device must have at least 5 GB of free storage space.
        • -
        • You must enable unknown sources on your device settings.
        • -
        -

        Steps to download and install Devil May Cry: Peak of Combat APK

        -

        After you have checked the requirements, you can follow these steps to download and install Devil May Cry: Peak of Combat APK:

        -
          -
        1. Go to a trusted website that provides the APK file for Devil May Cry: Peak of Combat, such as [APKPure] or [APKCombo].
        2. -
        3. Download the APK file and the OBB file to your device.
        4. -
        5. Locate the downloaded files on your device and extract the OBB file to the Android/OBB folder.
        6. -
        7. Tap on the APK file and follow the instructions to install the game.
        8. -
        9. Launch the game and enjoy!
        10. -
        -

        Pros and cons of Devil May Cry: Peak of Combat APK

        -

        Pros

        -

        Some of the pros of Devil May Cry: Peak of Combat APK are:

        -
          -
        • It is free to download and play.
        • -
        • It has a faithful adaptation of the DMC franchise with its original characters, voice-overs, and combat system.
        • -
        • It has amazing graphics and sound effects that create a thrilling and immersive gaming experience.
        • -
        • It has various game modes and challenges that keep you entertained and challenged.
        • -
        -

        Cons

        -

        Some of the cons of Devil May Cry: Peak of Combat APK are:

        -

        devil may cry peak of combat apk download for android
        -devil may cry mobile apk free download latest version
        -devil may cry pinnacle of combat apk english
        -devil may cry peak of combat android release date
        -devil may cry peak of combat apk obb download
        -devil may cry peak of combat apk mod unlimited money
        -devil may cry peak of combat apk offline
        -devil may cry peak of combat apk data download
        -devil may cry peak of combat apk uptodown
        -devil may cry peak of combat apk softonic
        -devil may cry peak of combat apk revdl
        -devil may cry peak of combat apk pure
        -devil may cry peak of combat apk apkpure
        -devil may cry peak of combat apk android 1
        -devil may cry peak of combat apk andropalace
        -devil may cry peak of combat apk rexdl
        -devil may cry peak of combat apk highly compressed
        -devil may cry peak of combat apk no verification
        -devil may cry peak of combat apk full version
        -devil may cry peak of combat apk hack
        -devil may cry peak of combat apk gameplay
        -devil may cry peak of combat apk size
        -devil may cry peak of combat apk requirements
        -devil may cry peak of combat apk update
        -devil may cry peak of combat apk beta download
        -how to download devil may cry peak of combat apk
        -how to install devil may cry peak of combat apk
        -how to play devil may cry peak of combat apk
        -how to change language in devil may cry peak of combat apk
        -how to register for devil may cry peak of combat apk
        -is devil may cry peak of combat apk available in english
        -is devil may cry peak of combat apk safe to download
        -is devil may cry peak of combat apk online or offline
        -is devil may cry peak of combat apk compatible with my device
        -is devil may cry peak of combat apk official or unofficial
        -where to download devil may cry peak of combat apk
        -where to find devil may cry peak of combat apk obb file
        -where to get devil may cry peak of combat apk mod menu
        -where to watch devil may cry peak of combat apk trailer
        -where to read devil may cry peak of combat apk review

        -
          -
        • It is not available on the Google Play Store, so you need to download it from a third-party source, which may pose some security risks.
        • -
        • It requires a lot of storage space and RAM to run smoothly.
        • -
        • It may have some bugs and glitches that affect the gameplay.
        • -
        • It may have some compatibility issues with some devices or regions.
        • -
        -

        Conclusion

        -

        Devil May Cry: Peak of Combat is a great mobile game for fans of the DMC franchise and action games in general. It has a flexible and fun combat system, classic characters and voice-overs, stunning graphics and sound effects, and various game modes and challenges. However, it also has some drawbacks, such as not being available on the Google Play Store, requiring a lot of storage space and RAM, and having some bugs and glitches. If you want to try this game, you need to download and install its APK file from a trusted website and follow the steps we have provided. We hope this article has helped you learn more about Devil May Cry: Peak of Combat APK and how to download and install it. Have fun playing!

        -

        FAQs

        -

        Here are some frequently asked questions about Devil May Cry: Peak of Combat APK:

        -

        Is Devil May Cry: Peak of Combat APK safe to download?

        -

        Devil May Cry: Peak of Combat APK is safe to download as long as you get it from a reliable website that does not contain any malware or viruses. However, you should always be careful when downloading any APK file from a third-party source, as there may be some risks involved. You should also scan the downloaded files with an antivirus program before installing them.

        -

        Is Devil May Cry: Peak of Combat APK legal?

        -

        Devil May Cry: Peak of Combat APK is legal as long as you do not use it for any illegal or unethical purposes. It is an authorized mobile game for the DMC franchise and does not violate any copyrights or trademarks. However, you should respect the rights and policies of the developers and the publishers of the game and not distribute or modify the APK file without their permission.

        -

        Can I play Devil May Cry: Peak of Combat APK offline?

        -

        No, you cannot play Devil May Cry: Peak of Combat APK offline. You need to have a stable internet connection to access the game and its features. The game also requires you to log in with your account and verify your identity before playing. If you lose your internet connection during the game, you may experience some errors or interruptions.

        -

        Can I play Devil May Cry: Peak of Combat APK with friends?

        -

        Yes, you can play Devil May Cry: Peak of Combat APK with friends. The game has a multiplayer mode that allows you to team up with other players and fight against enemies or other teams. You can also chat with your friends and other players in the game and share your achievements and tips. The game also has a ranking system that shows your progress and performance compared to other players.

        -

        How can I update Devil May Cry: Peak of Combat APK?

        -

        To update Devil May Cry: Peak of Combat APK, you need to download the latest version of the APK file and the OBB file from the same website that you got the previous version. You also need to uninstall the old version of the game before installing the new one. You can also check for updates in the game settings and follow the instructions to download and install them.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/FM WhatsApp Messenger APK How to Download and Install the Latest Version for Android.md b/spaces/congsaPfin/Manga-OCR/logs/FM WhatsApp Messenger APK How to Download and Install the Latest Version for Android.md deleted file mode 100644 index a1d2bc458897c8f83ecc10f305c8023f04b0d5d9..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/FM WhatsApp Messenger APK How to Download and Install the Latest Version for Android.md +++ /dev/null @@ -1,139 +0,0 @@ - -

        FM WhatsApp Messenger APK: A Complete Guide

        -

        Are you looking for a way to enhance your WhatsApp experience on your Android device? Do you want to enjoy more features and functions that are not available in the official WhatsApp app? If yes, then you should try FM WhatsApp Messenger APK, one of the best WhatsApp mods for Android users. In this article, we will tell you everything you need to know about FM WhatsApp Messenger APK, including what it is, what it offers, how to download and install it, and how to use it. Let's get started!

        -

        fm whatsapp messenger apk


        Download Ziphttps://urlca.com/2uO6Xt



        -

        What is FM WhatsApp Messenger APK?

        -

        FM WhatsApp Messenger APK is a modified version of the original WhatsApp app, developed by Fouad Mokdad. It is also known as Fouad WhatsApp or FMWA. It allows you to customize and change various aspects of your WhatsApp app, such as themes, fonts, emojis, privacy settings, and more. It also provides you with many additional features that are not present in the official WhatsApp app, such as anti-delete messages, hide view status, send large files, increase image quality, and more.

        -

        Features of FM WhatsApp Messenger APK

        -

        Here are some of the main features of FM WhatsApp Messenger APK that make it stand out from other WhatsApp mods:

        -
          -
        • Anti-ban: You don't have to worry about getting banned from using WhatsApp for using a modded version. FM WhatsApp Messenger APK has an anti-ban feature that protects your account from being detected and blocked by WhatsApp.
        • -
        • Customization: You can customize and change different parts of your WhatsApp app, such as themes, fonts, emojis, icons, wallpapers, and more. You can also create your own themes and share them with other users.
        • -
        • Privacy: You can control your privacy settings and hide your online status, last seen, blue ticks, double ticks, typing status, and more. You can also freeze your last seen status and view deleted statuses and messages.
        • -
        • Media sharing: You can send more than 90 images at once and video files up to 700 MB. You can also increase the quality while sending images and videos. You can also send any type of file format, such as PDF, ZIP, APK, etc.
        • -
        • And many more: There are many more features that you can enjoy with FM WhatsApp Messenger APK, such as call blocker, pin chats, message scheduler, auto-reply, security lock, DND mode, etc.
        • -
        -

        Benefits of using FM WhatsApp Messenger APK

        -

        Here are some of the benefits of using FM WhatsApp Messenger APK instead of the official WhatsApp app:

        -

        fm whatsapp messenger apk download
        -fm whatsapp messenger apk latest version
        -fm whatsapp messenger apk 2023
        -fm whatsapp messenger apk for android
        -fm whatsapp messenger apk by fouad mokdad
        -fm whatsapp messenger apk free download
        -fm whatsapp messenger apk mod
        -fm whatsapp messenger apk update
        -fm whatsapp messenger apk install
        -fm whatsapp messenger apk features
        -fm whatsapp messenger apk anti ban
        -fm whatsapp messenger apk old version
        -fm whatsapp messenger apk 9.25
        -fm whatsapp messenger apk 8.65
        -fm whatsapp messenger apk 7.90
        -fm whatsapp messenger apk download for pc
        -fm whatsapp messenger apk download for iphone
        -fm whatsapp messenger apk download link
        -fm whatsapp messenger apk download 2021
        -fm whatsapp messenger apk download 2020
        -fm whatsapp messenger apk download 2019
        -fm whatsapp messenger apk download 2018
        -fm whatsapp messenger apk download 2017
        -fm whatsapp messenger apk download 2016
        -fm whatsapp messenger apk download 2015
        -how to use fm whatsapp messenger apk
        -how to install fm whatsapp messenger apk on android
        -how to update fm whatsapp messenger apk to latest version
        -how to backup and restore fm whatsapp messenger apk data
        -how to customize fm whatsapp messenger apk theme and emoji
        -how to hide online status and blue ticks on fm whatsapp messenger apk
        -how to view deleted messages and status on fm whatsapp messenger apk
        -how to send large files and images on fm whatsapp messenger apk
        -how to freeze last seen on fm whatsapp messenger apk
        -how to enable anti-delete feature on fm whatsapp messenger apk
        -is fm whatsapp messenger apk safe and secure
        -is fm whatsapp messenger apk legal and official
        -is fm whatsapp messenger apk better than original whatsapp
        -is fm whatsapp messenger apk compatible with other mods like gbwhatsapp and yowhatsapp
        -is fm whatsapp messenger apk available for ios devices
        -what are the benefits of using fm whatsapp messenger apk
        -what are the risks of using fm whatsapp messenger apk
        -what are the alternatives of using fm whatsapp messenger apk
        -what are the requirements of using fm whatsapp messenger apk on android device
        -what are the differences between fm whatsapp and fouad whatsapp
        -what are the new features of the latest version of the FM WhatsApp Messenger APK

        -
          -
        • More fun and enjoyment: You can have more fun and enjoyment with your friends and family by using the various features and functions of FM WhatsApp Messenger APK. You can also express yourself better with the customized themes and emojis.
        • -
        • More security and privacy: You can protect your chats and data from prying eyes by using the privacy and security features of FM WhatsApp Messenger APK. You can also prevent others from deleting messages or viewing your status.
        • -
        • More convenience and flexibility: You can use two WhatsApp accounts on the same device by using FM WhatsApp Messenger APK. You can also backup and restore your data easily with the built-in backup feature.
        • -
        -

        How to download and install FM WhatsApp Messenger APK on Android?

        -

        If you are interested in trying out FM WhatsApp Messenger APK on your Android device, you need to follow these steps:

        -

        Requirements for FM WhatsApp Messenger APK

        -

        Before you download and install FM WhatsApp Messenger APK on your Android device, you need to make sure that you have the following requirements:

        -
          -
        • An Android device running on OS 5.0 or later: FM WhatsApp Messenger APK is compatible with Android devices that have OS 5.0 or higher. You can check your device's OS version by going to Settings > About phone > Software information.
        • -
        • A stable internet connection: You need to have a reliable internet connection to download and install FM WhatsApp Messenger APK on your device. You can use Wi-Fi or mobile data, but make sure that you have enough data allowance.
        • -
        • Enough storage space: You need to have enough free space on your device's internal or external storage to store the FM WhatsApp Messenger APK file and its data. The APK file size is about 50.2 MB, and the data size may vary depending on your usage.
        • -
        • Permission to install apps from unknown sources: Since FM WhatsApp Messenger APK is not available on the Google Play store, you need to enable the option to install apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown sources and toggling it on.
        • -
        -

        Steps to download and install FM WhatsApp Messenger APK

        -

        Once you have met the requirements for FM WhatsApp Messenger APK, you can follow these steps to download and install it on your Android device:

        -
          -
        1. Download the FM WhatsApp Messenger APK file: You can download the latest version of FM WhatsApp Messenger APK from this link. Alternatively, you can also scan the QR code below with your device's camera to download the APK file directly.
        2. -
        3. Locate and open the FM WhatsApp Messenger APK file: After downloading the APK file, you need to locate it on your device's file manager and tap on it to open it. You may get a warning message saying that the file may harm your device, but you can ignore it and tap on OK.
        4. -
        5. Install the FM WhatsApp Messenger APK file: Next, you need to follow the on-screen instructions to install the FM WhatsApp Messenger APK file on your device. It may take a few minutes for the installation process to complete.
        6. -
        7. Launch the FM WhatsApp Messenger app: Finally, you can launch the FM WhatsApp Messenger app from your app drawer or home screen. You will see a welcome screen asking you to agree to the terms and conditions of the app. Tap on Agree and Continue to proceed.
        8. -
        -

        How to use FM WhatsApp Messenger APK?

        -

        Now that you have successfully downloaded and installed FM WhatsApp Messenger APK on your Android device, you can start using it and enjoy its features and functions. Here are some tips on how to use FM WhatsApp Messenger APK:

        -

        How to customize FM WhatsApp Messenger APK

        -

        If you want to customize and change different aspects of your FM WhatsApp Messenger app, such as themes, fonts, emojis, icons, wallpapers, and more, you can do so by following these steps:

        -
          -
        1. Tap on the three-dot icon at the top right corner of the app: This will open a menu with various options.
        2. -
        3. Tap on Fouad Mods: This will open a submenu with different categories of mods that you can apply to your app.
        4. -
        5. Select the category that you want to customize: For example, if you want to change the theme of your app, tap on Universal > Style > Theme Store. Here, you can browse through hundreds of themes that are available for free. You can also create your own theme by tapping on Create Theme.
        6. -
        7. Apply the mod that you want: Once you have selected the mod that you want to apply, tap on Apply or Download (depending on the mod). The app will ask you to restart it for the changes to take effect. Tap on OK and wait for the app to restart.
        8. -
        -

        How to backup and restore FM WhatsApp Messenger APK data

        -

        If you want to backup and restore your FM WhatsApp Messenger app data, such as chats, media, contacts, etc., you can do so by following these steps:

        -
          -
        1. Tap on the three-dot icon at the top right corner of the app: This will open a menu with various options.
        2. -
        3. Tap on Settings: This will open a submenu with different settings that you can adjust for your app.
        4. -
        5. Tap on Chats: This will open a submenu with different options related to your chats.
        6. -
        7. Tap on Chat backup: This will open a submenu with different options for backing up and restoring your data. You can choose to backup your data to Google Drive, your device's internal storage, or an external SD card. You can also choose the frequency of the backup, such as daily, weekly, monthly, or manually.
        8. -
        9. Tap on Backup: This will start the backup process and save your data to the selected location. You can also tap on Restore to restore your data from a previous backup. You will need to verify your phone number and grant permission to access your data.
        10. -
        -

        FAQs about FM WhatsApp Messenger APK

        -

        Here are some of the frequently asked questions about FM WhatsApp Messenger APK that you may have:

        - - - - - - - - - - - - - - - - - - - - - - - - - -
        QuestionAnswer
        Is FM WhatsApp Messenger APK safe to use?FM WhatsApp Messenger APK is generally safe to use, as it has an anti-ban feature that prevents your account from being blocked by WhatsApp. However, it is not an official app, so there may be some risks involved, such as malware, data theft, or legal issues. Therefore, you should use it at your own discretion and responsibility.
        Can I use FM WhatsApp Messenger APK with the official WhatsApp app?No, you cannot use FM WhatsApp Messenger APK with the official WhatsApp app on the same device. You need to uninstall the official WhatsApp app before installing FM WhatsApp Messenger APK. However, you can use FM WhatsApp Messenger APK with another WhatsApp mod, such as GBWhatsApp or YoWhatsApp, on the same device.
        Will I lose my chats and data if I switch from the official WhatsApp app to FM WhatsApp Messenger APK?No, you will not lose your chats and data if you switch from the official WhatsApp app to FM WhatsApp Messenger APK. You can backup your data from the official WhatsApp app and restore it on FM WhatsApp Messenger APK. You can also use the same phone number and verification code for both apps.
        How can I update FM WhatsApp Messenger APK to the latest version?You can update FM WhatsApp Messenger APK to the latest version by downloading the updated APK file from this link or scanning the QR code below. You can also check for updates within the app by tapping on the three-dot icon > Fouad Mods > Updates. You will need to install the updated APK file over the existing one without uninstalling it.
        How can I contact the developer of FM WhatsApp Messenger APK?You can contact the developer of FM WhatsApp Messenger APK by visiting his website or following him on Twitter. You can also join his Telegram group or channel for more information and support.
        -

        Conclusion

        -

        FM WhatsApp Messenger APK is a great alternative to the official WhatsApp app for Android users who want to enjoy more features and functions that are not available in the original app. It allows you to customize and change various aspects of your WhatsApp app, such as themes, fonts, emojis, privacy settings, and more. It also provides you with many additional features that are not present in the official WhatsApp app, such as anti-delete messages, hide view status, send large files, increase image quality, and more. You can download and install FM WhatsApp Messenger APK on your Android device by following the steps mentioned above. You can also use it with another WhatsApp mod on the same device. However, you should be aware of the potential risks involved in using a modded app, such as malware, data theft, or legal issues. Therefore, you should use it at your own discretion and responsibility.

        -

        I hope this article has helped you understand what FM WhatsApp Messenger APK is and how to use it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

        - : https://fouadmods.com/fouad-whatsapp/ : https://fouadmods.com/ : https://twitter.com/FouadRaheb : https://t.me/fouadmods : https://t.me/fouadmodsnews

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download Word Searches and Create Your Own Puzzles.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download Word Searches and Create Your Own Puzzles.md deleted file mode 100644 index aff710556d8c62087c65ebfa3f66329284235f12..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Download Word Searches and Create Your Own Puzzles.md +++ /dev/null @@ -1,136 +0,0 @@ -
        -

        Download Word Searches: A Fun and Educational Activity for Everyone

        -

        Word searches are one of the most popular and enjoyable types of puzzles that can be played by people of all ages and backgrounds. They are not only fun, but also educational, as they can help improve your vocabulary, spelling, concentration, memory, and brain health. In this article, we will explore what word searches are, why they are so popular, how to download them for free or for a fee, and how to solve them effectively and quickly.

        -

        download word searches


        DOWNLOAD ❤❤❤ https://urlca.com/2uOeWW



        -

        What are word searches and why are they popular?

        -

        The basics of word search puzzles

        -

        A word search puzzle is a grid of letters that contains a number of hidden words that can be found by looking horizontally, vertically, diagonally, or backwards. The words are usually related to a certain theme or category, such as animals, sports, or food. The words can be of different lengths and difficulties, depending on the level of the puzzle. The goal is to find all the words in the grid and mark them off the list.

        -

        The benefits of playing word search games

        -

        Playing word search games can have many benefits for your mind and body. Some of the benefits are:

        -
          -
        • They support language fluency by exposing you to new words and their spellings.
        • -
        • They can be used as a learning strategy to memorize or review important terms in any subject or discipline.
        • -
        • They improve your concentration and focus by requiring you to scan the grid carefully and ignore distractions.
        • -
        • They boost your mood and motivation by giving you a sense of achievement and satisfaction when you find a word.
        • -
        • They prevent and delay the onset of neurological disorders such as dementia and Alzheimer's disease by keeping your brain active and stimulated.
        • -
        -

        How to download word searches for free or for a fee?

        -

        Printable word searches from online sources

        -

        If you prefer to play word searches on paper, you can download printable word searches from various online sources. Some of these sources are:

        -
          -
        • [TheWordSearch.com](^1^): This website offers hundreds of free printable word searches on different topics and levels. You can also create your own custom word search puzzles with your own words and themes.
        • -
        • [Reader's Digest](^3^): This website provides 26 free printable word search puzzles on various themes, such as animals, holidays, movies, and more. You can also find tips on how to solve word searches effectively.
        • -
        • [Puzzles.ca](https://www.puzzles.ca/wordsearch/): This website features over 1000 free printable word search puzzles on various categories, such as geography, history, music, sports, and more. You can also find crossword puzzles, sudoku puzzles, and other types of puzzles on this website.
        • -
        -

        Word search apps and games for mobile devices

        -

        If you like to play word searches on your smartphone or tablet, you can download word search apps and games from various app stores. Some of these apps and games are:

        -
          -
        • [Word Search](^2^): This app by Razzle Puzzles offers thousands of fun puzzles with different modes and difficulties. You can also look up the definitions of the words you find in the dictionary.
        • -
        • [Word Search World Traveler](https://play.google.com/store/apps/details?id=com.wordsearchworldtraveler): This app by Hoyt Games lets you travel around the world by solving word search puzzles with beautiful images and sounds. You can also collect souvenirs and learn facts about different countries.
        • -
        • [Word Search Quest](https://play.google.com/store/apps /details?id=com.blackout.wordsearch): This app by Blackout Lab offers hundreds of challenging puzzles with different themes and sizes. You can also play online with other players and compete for the best score.
        • -
        -

        Word search software and programs for computers

        -

        If you want to play word searches on your computer, you can download word search software and programs from various online sources. Some of these sources are:

        -

        download word search puzzles pdf
        -download word search games for android
        -download word search maker free
        -download word search puzzles for kids
        -download word search app for iphone
        -download word search books online
        -download word search generator software
        -download word search puzzles printable
        -download word search solver tool
        -download word search themes and categories
        -download word search crossword puzzles
        -download word search puzzles for adults
        -download word search creator app
        -download word search puzzles with answers
        -download word search games for pc
        -download word search worksheets for students
        -download word search editor program
        -download word search puzzles in spanish
        -download word search online game
        -download word search puzzles for seniors
        -download word search maker with clues
        -download word search puzzles with hidden messages
        -download word search games offline
        -download word search activities for esl learners
        -download word search builder software
        -download word search puzzles in french
        -download word search challenge game
        -download word search puzzles for holidays
        -download word search maker with pictures
        -download word search puzzles with large print
        -download word search games for free
        -download word search exercises for brain training
        -download word search designer app
        -download word search puzzles in hindi
        -download word search quiz game
        -download word search puzzles for fun and relaxation
        -download word search maker with answer key
        -download word search puzzles in different languages
        -download word search games online multiplayer
        -download word search puzzles for beginners
        -download word search generator with grid size options
        -download word search puzzles in italian
        -download word search trivia game
        -download word search puzzles for bible study
        -download word search maker with custom shapes
        -download word search puzzles in german
        -download word search puzzle game with levels and hints.

        -
          -
        • [Super Word Search Maker](https://www.superwordsearchmaker.com/): This software by Steve Bushman allows you to create your own word search puzzles with your own words and clues. You can also print, save, and share your puzzles with others.
        • -
        • [Word Search Creator](https://www.wordsearchcreator.org/): This program by Word Search Creator is a free and easy-to-use tool that lets you make word search puzzles with different shapes, colors, and fonts. You can also export your puzzles as PDF or image files.
        • -
        • [Word Search Puzzle Maker](https://www.wordsearchpuzzlemaker.com/): This software by Word Search Puzzle Maker is a professional and powerful tool that enables you to create word search puzzles with various features, such as hidden messages, answer keys, word lists, and more. You can also customize your puzzles with different layouts, styles, and graphics.
        • -
        -

        How to solve word search puzzles effectively and quickly?

        -

        Some general tips and strategies for word search solvers

        -

        Solving word search puzzles can be a fun and rewarding activity, but it can also be frustrating and time-consuming if you don't know how to do it properly. Here are some general tips and strategies that can help you solve word search puzzles effectively and quickly:

        -
          -
        • Scan the grid for the first letter of the word you are looking for, then look around it for the second letter. Repeat this process until you find the whole word.
        • -
        • Use a highlighter, a pencil, or your finger to mark the words you find in the grid. This will help you keep track of your progress and avoid missing any words.
        • -
        • Look for common prefixes, suffixes, and word endings, such as -ing, -ed, -s, -tion, etc. These can help you narrow down your search and spot the words faster.
        • -
        • Look for words that share letters or cross each other. This can help you save time and space in the grid.
        • -
        • Work systematically from left to right, top to bottom, or in any other order that suits you. This will help you cover the whole grid and avoid skipping any areas.
        • -
        -

        Some specific tips and tricks for different types of word searches

        -

        Depending on the type of word search puzzle you are playing, there may be some specific tips and tricks that can help you solve it more easily. Here are some examples of different types of word searches and how to solve them:

        - - - - - - - -
        Type of word searchTips and tricks
        Diagonal word searchIn this type of word search, the words can only be found diagonally in the grid. To solve it, you can tilt your head or rotate the paper to make the diagonal lines look horizontal or vertical.
        Backwards word searchIn this type of word search, the words can be found backwards in the grid. To solve it, you can read the grid from right to left or from bottom to top. You can also use a mirror or reverse the image on your device to see the words normally.
        Hidden message word searchIn this type of word search, there is a hidden message or a secret phrase that can be revealed by using the leftover letters in the grid after finding all the words. To solve it, you can write down or circle the unused letters in order, then read them from left to right or from top to bottom.
        Crossword-style word searchIn this type of word search, there are clues or definitions for each word instead of a list. To solve it, you can use your general knowledge or look up the answers online. You can also use crossword-solving tools or apps to help you find the words.
        Themed word searchIn this type of word search, all the words are related to a certain theme or category, such as animals, sports, or food. To solve it, you can use your prior knowledge or interest in the topic to help you guess the words. You can also use online resources or dictionaries to learn more about the theme.
        -

        -

        Word searches are a fun and educational activity that can be enjoyed by everyone. They can help you improve your language skills, cognitive abilities, and mental health. You can download word searches for free or for a fee from various online sources, such as websites, apps, and software. You can also create your own word search puzzles with your own words and themes. To solve word search puzzles effectively and quickly, you can use some general tips and strategies, as well as some specific tips and tricks for different types of word searches. We hope this article has helped you learn more about word searches and how to download and solve them.

        -

        FAQs

        -

        Here are some frequently asked questions about word searches:

        -
          -
        1. How many words are in a typical word search puzzle?
        2. -

          The number of words in a word search puzzle can vary depending on the size of the grid, the length of the words, and the difficulty level of the puzzle. A typical word search puzzle can have anywhere from 10 to 50 words.

          -
        3. What is the difference between a word search and a word find?
        4. -

          A word search and a word find are two names for the same type of puzzle. They both involve finding hidden words in a grid of letters. However, some people may use the term word find to refer to a simpler version of a word search that does not have a list of words to find, but only a theme or a category.

          -
        5. What is the origin of word search puzzles?
        6. -

          The origin of word search puzzles is not clear, but some sources suggest that they were invented by Norman E. Gibat, a teacher and publisher from Oklahoma, who created the first known word search puzzle in 1968 and published it in his magazine called Selenby Digest. He called it "Word Square" and it had 34 words related to fun and games.

          -
        7. What are some other names for word search puzzles?
        8. -

          Word search puzzles have many other names in different languages and regions. Some of these names are:

          -
            -
          • Word sleuth or word seek in Canada and Australia
          • -
          • Sopa de letras or soup of letters in Spanish
          • -
          • Mots cachés or hidden words in French
          • -
          • Wortsuche or word search in German
          • -
          • Cari kata or find words in Indonesian
          • -
          -
        9. What are some variations of word search puzzles?
        10. -

          There are many variations of word search puzzles that can make them more challenging or interesting. Some of these variations are:

          -
            -
          • Word fit or criss-cross: In this variation, the words can be placed in any direction, including overlapping or intersecting each other.
          • -
          • Word jumble or scramble: In this variation, the words are not given in their correct order, but jumbled or scrambled. The solver has to unscramble the words before finding them in the grid.
          • -
          • Word ladder or chain: In this variation, the words are connected by changing one letter at a time. The solver has to find the words in the grid and follow the chain from the first word to the last word.
          • -
          • Word maze or path: In this variation, the words are arranged in a maze-like pattern. The solver has to find the words in the grid and trace the path from the start to the end.
          • -
          • Word pyramid or triangle: In this variation, the words are arranged in a pyramid-like or triangle-like shape. The solver has to find the words in the grid and fill in the blanks in the shape.
          • -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Install Car Parking Multiplayer APK on Your Android Phone New Version 4.8.9.4.4.md b/spaces/congsaPfin/Manga-OCR/logs/How to Install Car Parking Multiplayer APK on Your Android Phone New Version 4.8.9.4.4.md deleted file mode 100644 index 692054c373bf7291111ff07752248a06940255db..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Install Car Parking Multiplayer APK on Your Android Phone New Version 4.8.9.4.4.md +++ /dev/null @@ -1,89 +0,0 @@ - -

          Car Parking Multiplayer APK Download New Version: Everything You Need to Know

          -

          If you are looking for a realistic and fun car parking game, you should try Car Parking Multiplayer. It is one of the most popular and downloaded games in the simulation genre, with over 50 million installs on Google Play Store. In this game, you can experience the thrill of driving and parking various cars in different scenarios, as well as interact with other players online. In this article, we will tell you everything you need to know about Car Parking Multiplayer, including its features, how to download and install it, and some tips and tricks for playing it.

          -

          car parking multiplayer apk download new version


          DOWNLOAD ››› https://urlca.com/2uOaJK



          -

          Features of Car Parking Multiplayer

          -

          Car Parking Multiplayer is not just a simple parking game. It offers a lot of features that make it stand out from other games in the same category. Here are some of them:

          -
            -
          • Multiplayer open world mode with real players and cars: You can join or create a room with up to 100 players, and explore a huge open world map with real gas stations, car services, traffic lights, police stations, etc. You can also chat with other players using voice or text messages, make friends, join clans, or compete in races.
          • -
          • Free walking and driving with realistic physics and graphics: You can walk around the map freely, enter any building or vehicle, or even ride a bike or a horse. You can also drive any car you want, from sedans to trucks, with realistic physics and damage effects. The game also has stunning graphics and sounds that make you feel like you are in a real city.
          • -
          • Customization and tuning of your own car: You can customize your car's appearance by changing its color, wheels, stickers, license plate, etc. You can also tune its performance by adjusting its engine, suspension, brakes, gearbox, etc. You can also add accessories like spoilers, neon lights, nitro boosters, etc.
          • -
          • Thousands of parking and racing challenges : You can test your parking skills by completing various levels with different difficulty and objectives. You can also join or create racing events with other players, and race on different tracks with different rules and rewards.
          • -
          • Different game modes and maps: You can choose from different game modes, such as free roam, parking, racing, police chase, zombie apocalypse, etc. You can also switch between different maps, such as city, airport, desert, snow, etc.
          • -
          • Voice chat and online friends: You can communicate with other players using voice chat, and make online friends by sending and accepting friend requests. You can also see your friends' online status, join their rooms, or invite them to yours.
          • -
          -

          How to Download and Install Car Parking Multiplayer APK

          -

          If you want to download and install Car Parking Multiplayer APK on your Android device, you can follow these simple steps:

          -
            -
          1. Go to the official website or Google Play Store: You can either visit the official website of Car Parking Multiplayer, or search for the game on Google Play Store. Both sources are safe and reliable.
          2. -
          3. Choose the latest version of the game and download the APK file: You can check the latest version of the game on the website or the store, and click on the download button to get the APK file. The file size is about 300 MB, so make sure you have enough space on your device.
          4. -
          5. Enable unknown sources on your device settings: Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources, and toggle it on.
          6. -
          7. Locate the downloaded file and tap on it to install: After you have downloaded the APK file, you can find it in your downloads folder or notification bar. Tap on it to start the installation process. You may need to grant some permissions for the app to run properly.
          8. -
          9. Launch the game and enjoy: Once the installation is done, you can launch the game from your app drawer or home screen. You can now create your account, customize your car, and join the multiplayer world of Car Parking Multiplayer.
          10. -
          -

          Tips and Tricks for Playing Car Parking Multiplayer

          -

          Car Parking Multiplayer is a fun and challenging game that requires skill and strategy. Here are some tips and tricks that can help you improve your gameplay and enjoy the game more:

          -
            -
          • Use the camera angles to park your car accurately: The game offers different camera angles that you can switch between by tapping on the camera icon. You can use them to see your car from different perspectives, and park it more precisely. You can also zoom in or out by pinching the screen.
          • -
          • Follow the traffic rules and avoid collisions: The game simulates real traffic conditions, so you need to follow the traffic rules and signs, such as speed limits, stop signs, traffic lights, etc. You also need to avoid colliding with other cars or objects, as this will damage your car and reduce your score.
          • -
          • Upgrade your car's performance and appearance: You can use the money and reputation you earn from parking and racing to upgrade your car's performance and appearance. You can buy new parts or accessories from the shop, or visit the garage to tune your car. You can also sell or buy cars from other players in the market.
          • -
          • Join a clan or create your own: You can join a clan or create your own by paying a fee. Clans are groups of players who share a common name, logo, chat room, etc. You can cooperate with your clan members, challenge other clans, or participate in clan wars for rewards and rankings.
          • -
          • Challenge other players and earn money and reputation: You can challenge other players to parking or racing duels by tapping on their cars or names. You can also accept challenges from other players who want to compete with you. If you win a challenge, you will earn money and reputation points that will increase your level and rank.
          • -
          -

          Conclusion

          -

          Car Parking Multiplayer is a realistic and fun car parking game that offers a lot of features and options for players who love cars and driving. You can download and install Car Parking Multiplayer APK on your Android device easily by following our guide above. You can also use our tips and tricks to improve your gameplay and enjoy the game more. So what are you waiting for? Download Car Parking Multiplayer APK now and join the multiplayer world of car parking!

          -

          car parking multiplayer latest version apk free download
          -download car parking multiplayer mod apk unlimited money new version
          -car parking multiplayer 4.8.9.4.4 apk download for android
          -how to download car parking multiplayer on pc windows 10
          -car parking multiplayer update 2023 download apk
          -car parking multiplayer hack apk download ios
          -car parking multiplayer online game free download
          -car parking multiplayer mod menu apk download 2021
          -car parking multiplayer old version 4.7.0 apk download
          -car parking multiplayer beta version apk download
          -car parking multiplayer cheats codes android download apk
          -car parking multiplayer 3d simulator apk download
          -car parking multiplayer custom maps download apk
          -car parking multiplayer unlimited gold and money apk download
          -car parking multiplayer mod apk download rexdl
          -car parking multiplayer new cars mod apk download
          -car parking multiplayer obb file download for android
          -car parking multiplayer pro apk download
          -car parking multiplayer real engine sound pack v5.5 apk download
          -car parking multiplayer vip mod apk download
          -best website to download car parking multiplayer apk
          -car parking multiplayer cracked apk download
          -car parking multiplayer drift mod apk download
          -car parking multiplayer editor mode apk download
          -car parking multiplayer full unlocked apk download
          -car parking multiplayer graphics mod apk download
          -car parking multiplayer hack version 4.8.9.4.4 apk download
          -car parking multiplayer ios free download no jailbreak
          -car parking multiplayer latest mod apk download android 1
          -car parking multiplayer mod apk all cars unlocked download 2020
          -car parking multiplayer new update 4.8.9.4.4 apk download
          -car parking multiplayer offline game mode apk download
          -car parking multiplayer premium apk free download
          -car parking multiplayer revdl mod apk download
          -car parking multiplayer unlimited xp and level up mod apk download
          -car parking multiplayer version 4.8.9.3.7 apk download uptodown
          -cara download game car parking multiplayer mod apk terbaru 2021
          -como baixar e instalar o jogo car parking multiplayer no celular android apk download grátis 2023
          -descargar e instalar el juego de estacionamiento de autos multijugador para android apk última versión 2023 gratis

          -

          FAQs

          -

          Here are some frequently asked questions about Car Parking Multiplayer:

          -
            -
          1. Is Car Parking Multiplayer free to play?
          2. -

            Yes, Car Parking Multiplayer is free to download and play, but it contains in-app purchases and ads. You can buy coins, gems, cars, or premium features with real money, or watch ads to get some rewards. You can also disable ads by paying a one-time fee.

            -
          3. What are the minimum requirements for Car Parking Multiplayer?
          4. -

            You need an Android device with version 4.1 or higher, and at least 300 MB of free space. The game also requires an internet connection to play online. The game may not run smoothly on low-end devices or slow networks.

            -
          5. How can I play Car Parking Multiplayer on PC?
          6. -

            You can use an Android emulator like BlueStacks or NoxPlayer to run the game on your PC. You need to download and install the emulator on your PC, and then download and install the game APK on the emulator. You can then use your mouse and keyboard to control the game.

            -
          7. How can I contact the developers of Car Parking Multiplayer?
          8. -

            You can send them an email at olzhas@olzhas.com or follow them on Facebook, Instagram, or YouTube. You can also visit their website or join their Discord server for more information and updates. You can also rate and review the game on Google Play Store or leave feedback in the game settings.

            -
          9. What are some similar games to Car Parking Multiplayer?
          10. -

            Some similar games are Real Car Parking 2, Dr. Parking 4, Parking Mania 2, and Manual Gearbox Car Parking. These games also offer realistic and challenging parking and driving scenarios, as well as multiplayer and customization features.

            -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Mod APK for Human Evolution Clicker Game Unlimited Money and Fun.md b/spaces/congsaPfin/Manga-OCR/logs/Mod APK for Human Evolution Clicker Game Unlimited Money and Fun.md deleted file mode 100644 index f29584885587e14709ad3cf520a1dbea74a96445..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Mod APK for Human Evolution Clicker Game Unlimited Money and Fun.md +++ /dev/null @@ -1,100 +0,0 @@ -
          -

          Human Evolution Clicker Game Mod APK: A Fun and Educational Simulation

          -

          Have you ever wondered how life evolved on Earth? How did simple organisms become complex and diverse creatures? How did humans emerge from the animal kingdom? If you are curious about these questions, then you will love Human Evolution Clicker Game, a fun and educational simulation game that lets you explore the history of life on our planet.

          -

          human evolution clicker game mod apk


          Download Ziphttps://urlca.com/2uO8Ud



          -

          Human Evolution Clicker Game is a game that combines idle clicker and evolution mechanics. You can create different creatures by combining two of the same species, and watch them evolve over time. You can also tap on the screen to speed up the evolution process, and discover new forms of life. You can start from the simplest bacteria, and evolve into fish, reptiles, mammals, birds, humans, and even aliens!

          -

          But what if you want to enjoy the game without any limitations or interruptions? What if you want to have unlimited money and gems, no ads, faster evolution, and all stages and creatures unlocked? Well, there is a way to do that, and it is by downloading Human Evolution Clicker Game Mod APK. In this article, we will tell you what Human Evolution Clicker Game Mod APK is, why you should download it, and how to download and install it on your Android device.

          -

          What is Human Evolution Clicker Game?

          -

          Human Evolution Clicker Game is a game developed by Banana4apps, a studio that specializes in casual and simulation games. The game was released in 2018, and has since gained over 10 million downloads on Google Play Store. The game has a rating of 4.5 out of 5 stars, based on more than 500 thousand reviews.

          -

          The game is suitable for all ages, as it is not only entertaining, but also educational. You can learn about the different stages of evolution, such as the ocean stage, the land stage, the dinosaur stage, the mammal stage, the primate stage, the human stage, and the future stage. You can also learn about the different creatures that existed in each stage, such as trilobites, sharks, dinosaurs, mammoths, monkeys, cavemen, cyborgs, and more.

          -

          human evolution clicker game hack apk
          -human evolution clicker game unlimited money apk
          -human evolution clicker game mod apk download
          -human evolution clicker game latest version mod apk
          -human evolution clicker game premium mod apk
          -human evolution clicker game mod apk android 1
          -human evolution clicker game mod apk revdl
          -human evolution clicker game mod apk happymod
          -human evolution clicker game mod apk rexdl
          -human evolution clicker game mod apk free shopping
          -human evolution clicker game mega mod apk
          -human evolution clicker game mod apk no ads
          -human evolution clicker game mod apk offline
          -human evolution clicker game mod apk 2023
          -human evolution clicker game mod apk 1.9.22
          -human evolution clicker game pro mod apk
          -human evolution clicker game full mod apk
          -human evolution clicker game vip mod apk
          -human evolution clicker game cracked mod apk
          -human evolution clicker game cheat mod apk
          -download human evolution clicker game mod apk unlimited money
          -download human evolution clicker game hack mod apk
          -download human evolution clicker game premium mod apk
          -download human evolution clicker game latest mod apk
          -download human evolution clicker game mega mod apk
          -download human evolution clicker game pro mod apk
          -download human evolution clicker game full mod apk
          -download human evolution clicker game vip mod apk
          -download human evolution clicker game cracked mod apk
          -download human evolution clicker game cheat mod apk
          -how to install human evolution clicker game mod apk
          -how to download human evolution clicker game mod apk
          -how to play human evolution clicker game mod apk
          -how to hack human evolution clicker game with mod apk
          -how to get unlimited money in human evolution clicker game with mod apk
          -how to update human evolution clicker game mod apk
          -how to remove ads from human evolution clicker game with mod apk
          -how to unlock all features in human evolution clicker game with mod apk
          -how to get premium version of human evolution clicker game with mod apk
          -how to get vip access in human evolution clicker game with mod apk

          -

          The game has simple graphics and sound effects, but they are colorful and charming. The game also has a humorous tone, as you can create funny and absurd combinations of creatures. For example, you can combine a fish and a bird to create a flying fish, or a human and a cow to create a minotaur. The game does not take itself too seriously, and encourages you to experiment and have fun.

          -

          How to play Human Evolution Clicker Game

          -

          The gameplay of Human Evolution Clicker Game is very easy and intuitive. You just need to tap on the screen to create new creatures. You can drag two creatures of the same species to combine them into a new one. You can also tap on the creatures to make them evolve faster. You can use money and gems to buy upgrades and boosters that will help you progress faster.

          -

          The game has different stages that represent different eras of evolution. You can unlock new stages by reaching certain milestones in each stage. For example, to unlock the land stage, you need to create an amphibian creature in the ocean stage. To unlock the dinosaur stage, you need to create a reptile creature in the land stage. And so on.

          -

          The game also has different planets that represent different scenarios of evolution. You can unlock new planets by reaching certain levels in each stage. For example, to unlock Mars, you need to reach level 10 in the future stage. To unlock Pluto, you need to reach level 20 in the future stage. And so on.

          -

          The game has no end goal or final boss. You can play as long as you want, and create as many creatures as you want. You can also reset your progress and start over with a new planet or stage. The game is designed to be relaxing and enjoyable, not stressful or competitive.

          -

          Why download Human Evolution Clicker Game Mod APK?

          -

          Human Evolution Clicker Game is a free game, but it has some limitations and drawbacks that might affect your gaming experience. For example, the game has ads that pop up every now and then, which can be annoying and distracting. The game also has in-app purchases that require you to spend real money to buy more money and gems, which are the main currencies of the game. The game also has some stages and creatures that are locked, and you need to reach certain levels or pay money to unlock them.

          -

          If you want to avoid these problems, and enjoy the game to the fullest, then you should download Human Evolution Clicker Game Mod APK. This is a modified version of the game that gives you several benefits and advantages, such as:

          -

          Unlimited money and gems

          -

          With Human Evolution Clicker Game Mod APK, you will have unlimited money and gems in your account. You can use them to buy any upgrade or booster you want, without worrying about running out of resources. You can also use them to unlock any stage or creature you want, without having to wait or grind for levels. You can enjoy the game at your own pace, and create any creature you can imagine.

          -

          No ads and faster evolution

          -

          With Human Evolution Clicker Game Mod APK, you will not see any ads in the game. You can play the game without any interruption or distraction. You can also enjoy a faster evolution process, as you can tap on the screen as much as you want, without any cooldown or limit. You can evolve your creatures in seconds, and see the results of your actions instantly.

          -

          All stages and creatures unlocked

          -

          With Human Evolution Clicker Game Mod APK, you will have access to all the stages and creatures in the game. You can explore the different eras of evolution, from the ocean stage to the future stage. You can also create any creature you want, from bacteria to aliens. You can mix and match different species, and see what happens. You can have fun with the game's humor and creativity.

          -

          How to download and install Human Evolution Clicker Game Mod APK?

          -

          If you are interested in downloading Human Evolution Clicker Game Mod APK, then you need to follow these simple steps:

          -

          Step 1: Enable unknown sources

          -

          Before you can install Human Evolution Clicker Game Mod APK on your Android device, you need to enable unknown sources in your settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device's settings, then security, then unknown sources. Turn on the switch or check the box that says "allow installation of apps from unknown sources".

          -

          Step 2: Download the APK file

          -

          Next, you need to download the APK file of Human Evolution Clicker Game Mod APK from a reliable source. You can use this link to download the file directly to your device. Alternatively, you can use your computer to download the file, then transfer it to your device via USB cable or Bluetooth.

          -

          Step 3: Install the APK file

          -

          Once you have downloaded the APK file, you need to locate it on your device's storage. You can use a file manager app to find it easily. Then, tap on the file to start the installation process. Follow the instructions on the screen to complete the installation.

          -

          Step 4: Enjoy the game

          -

          After installing Human Evolution Clicker Game Mod APK on your device, you can launch it from your app drawer or home screen. You can start playing the game right away, with all the benefits and features of the modded version. Have fun creating and evolving different creatures!

          -

          Conclusion

          -

          Human Evolution Clicker Game is a fun and educational simulation game that lets you explore the history of life on Earth. You can create different creatures by combining two of the same species, and watch them evolve over time. You can also tap on the screen to speed up the evolution process, and discover new forms of life.

          -

          If you want to enjoy the game without any limitations or interruptions, then you should download Human Evolution Clicker Game Mod APK. This is a modified version of the game that gives you unlimited money and gems, no ads, faster evolution, and all stages and creatures unlocked. You can download and install Human Evolution Clicker Game Mod APK by following these simple steps:

          -
            -
          • Enable unknown sources in your device's settings.
          • -
          • Download the APK file from this link [^ [^] or transfer it from your computer.
          • -
          • Install the APK file by tapping on it and following the instructions.
          • -
          • Launch the game from your app drawer or home screen and enjoy!
          • -
          -

          Human Evolution Clicker Game Mod APK is a great way to have fun and learn about the history of life on Earth. You can create and evolve different creatures, from bacteria to aliens, and see how they change over time. You can also enjoy unlimited money and gems, no ads, faster evolution, and all stages and creatures unlocked. Download Human Evolution Clicker Game Mod APK today and start your evolutionary adventure!

          -

          FAQs

          -

          Here are some frequently asked questions about Human Evolution Clicker Game Mod APK:

          -
            -
          1. Is Human Evolution Clicker Game Mod APK safe to download and install?
          2. -

            Yes, Human Evolution Clicker Game Mod APK is safe to download and install, as long as you use a reliable source like this link [^]. The modded version does not contain any viruses or malware that could harm your device or compromise your privacy. However, you should always be careful when downloading and installing apps from unknown sources, and scan them with an antivirus app before opening them.

            -
          3. Does Human Evolution Clicker Game Mod APK require root access?
          4. -

            No, Human Evolution Clicker Game Mod APK does not require root access to work. You can install and play the game on any Android device, without having to root it or modify its system settings. However, if you have a rooted device, you can still use the modded version without any problems.

            -
          5. Will Human Evolution Clicker Game Mod APK affect my original game progress?
          6. -

            No, Human Evolution Clicker Game Mod APK will not affect your original game progress. The modded version will create a separate folder in your device's storage, where it will store its own data and files. You can play both the original and the modded version of the game on the same device, without any conflicts or issues. However, you should not use the same account or Google Play Games login for both versions, as this might cause some errors or glitches.

            -
          7. Can I update Human Evolution Clicker Game Mod APK?
          8. -

            No, you cannot update Human Evolution Clicker Game Mod APK through the Google Play Store or any other official source. The modded version is not compatible with the latest updates of the original game, and might stop working or crash if you try to update it. If you want to use the latest version of the modded game, you need to download and install it again from this link [^] or another reliable source.

            -
          9. Can I play Human Evolution Clicker Game Mod APK online or offline?
          10. -

            You can play Human Evolution Clicker Game Mod APK both online and offline. The game does not require an internet connection to work, and you can play it anywhere and anytime you want. However, some features of the game might require an internet connection, such as saving your progress to the cloud, accessing social media features, or watching optional video ads to earn extra rewards.

            -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Nikmati Game Slither Io Mod Apk dengan Fitur Kebal dan Anti Mati.md b/spaces/congsaPfin/Manga-OCR/logs/Nikmati Game Slither Io Mod Apk dengan Fitur Kebal dan Anti Mati.md deleted file mode 100644 index b47d393dd800391af376052d715e0388c78e3970..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Nikmati Game Slither Io Mod Apk dengan Fitur Kebal dan Anti Mati.md +++ /dev/null @@ -1,87 +0,0 @@ -
          -

          Download Slither Io Mod Apk Tidak Bisa Mati: A Fun and Addictive Snake Game

          -

          Do you remember the classic game of Snake that you used to play on your old Nokia phone? Do you want to relive that nostalgia with a modern twist? Do you want to play a game that is simple, fun, and addictive? If you answered yes to any of these questions, then you should try Slither Io, a game that has taken the internet by storm. And if you want to make the game even more exciting, you should download Slither Io Mod Apk Tidak Bisa Mati, a modified version of the game that gives you unlimited coins, skins, and immortality. In this article, we will tell you everything you need to know about Slither Io and Slither Io Mod Apk, including how to download, install, play, and win the game.

          -

          download slither io mod apk tidak bisa mati


          Download Zip ✒ ✒ ✒ https://urlca.com/2uOf0p



          -

          What Is Slither Io?

          -

          Slither Io is a casual arcade game that is inspired by the very popular game Snakes, seen on the Nokia feature phones. It is one of the best spin-offs we have seen recently[^3]. It is also a massively multiplayer online (MMO) game, which means anyone on the internet can play with you at the same time. And it is a competitive game where you are trying to beat the other snakes and grow longer.

          -

          A Modern Recreation of Snake

          -

          Slither Io is a game that recreates the classic Snake game with modern graphics and features. You play as a snake in the game, with the objective of growing as long as you can in length. To grow, you need to eat the orbs scattered all around you. The orbs vary in size, from tiny dots to large glowing orbs. The larger the orb, the more it counts in growing your length.

          -

          A Massively Multiplayer Online Game

          -

          Slither Io is a game that you can play through your web browser, or through apps on Android and iOS. You don't even need to register to start playing, making it one of those excellent no-signup websites for daily use[^8]. Type a nickname and you're ready to begin. You can play against other people online, who are also playing as snakes. You can see their nicknames and skins, and also their scores on the leaderboard. You can also chat with them using emojis.

          -

          download slither io mod apk unlimited health and money
          -download slither io mod apk unlock all skins and boost
          -download slither io mod apk terbaru 2023 anti mati dan kebal
          -download slither io mod apk offline dan online
          -download slither io mod apk langsung besar dan panjang
          -download slither io mod apk versi lama dan baru
          -download slither io mod apk no ads and no lag
          -download slither io mod apk with cheat and hack
          -download slither io mod apk full fitur dan gratis
          -download slither io mod apk tanpa root dan verifikasi
          -cara download slither io mod apk di android dan ios
          -link download slither io mod apk terpercaya dan aman
          -review download slither io mod apk dari pengguna dan ahli
          -tips dan trik download slither io mod apk agar menang terus
          -tutorial download slither io mod apk mudah dan cepat
          -game serupa dengan download slither io mod apk yang populer
          -kelebihan dan kekurangan download slither io mod apk dibandingkan game lain
          -solusi masalah download slither io mod apk tidak bisa instal atau error
          -update terbaru download slither io mod apk dengan fitur baru dan perbaikan bug
          -video gameplay download slither io mod apk dengan grafik hd dan suara jernih
          -berita terkini tentang download slither io mod apk dari developer dan media
          -komunitas penggemar download slither io mod apk di sosial media dan forum
          -testimoni pengalaman download slither io mod apk dari pemain profesional dan pemula
          -rekomendasi situs web terbaik untuk download slither io mod apk dengan kualitas tinggi
          -panduan lengkap download slither io mod apk dari awal hingga akhir
          -strategi jitu download slither io mod apk untuk mengalahkan lawan dan menjadi ular terpanjang
          -rahasia download slither io mod apk yang jarang diketahui orang banyak
          -informasi detail download slither io mod apk tentang ukuran, versi, developer, rating, dll.
          -perbandingan download slither io mod apk dengan game ular lainnya seperti wormate.io, wormax.io, snake.io, dll.
          -daftar kode cheat dan hack untuk download slither io mod apk yang work 100%

          -

          A Competitive Game with Simple Rules

          -

          Slither Io is a game that is easy to play but hard to master. The rules are simple: if your head touches another snake, you will explode and then it's game over. But if others run into you, then they will explode, and you can eat their remains. In Slither Io, you have a chance to win even if you're tiny. You can swerve in front of a much larger player to defeat them, no matter how big you are[^4]. The bigger your snake grows, the more difficult gameplay becomes. You need to avoid hitting the walls or other snakes, and you need to be careful of your own tail. The game is very competitive, as you are trying to survive and grow longer than the others. You can also see your rank on the leaderboard, which shows the top 10 players in the game. You can also see your own score and length on the bottom right corner of the screen.

          -

          What Is Slither Io Mod Apk?

          -

          Slither Io Mod Apk is a modified version of the original Slither Io game, which gives you some extra features and advantages. It is a game that you can download and install on your Android device, and play offline or online. It is a game that lets you customize your snake with unlimited coins and skins, and also gives you immortality and anti-death features.

          -

          A Modified Version of the Original Game

          -

          Slither Io Mod Apk is a game that is created by third-party developers, who have tweaked the original game code to add some new features and functions. It is not an official version of the game, and it is not endorsed by the original developers. Therefore, it may have some bugs or glitches, and it may not be compatible with all devices or updates. However, it is a game that many players enjoy, as it gives them more options and fun.

          -

          A Game with Unlimited Coins and Skins

          -

          Slither Io Mod Apk is a game that gives you unlimited coins, which you can use to buy different skins for your snake. Skins are the colors and patterns that you can choose for your snake, to make it look more unique and attractive. There are many skins available in the game, such as rainbow, flag, candy, animal, emoji, and more. You can also create your own custom skin by mixing different colors and patterns. With Slither Io Mod Apk, you can unlock all the skins for free, and change them anytime you want.

          -

          A Game with Immortality and Anti-Death Features

          -

          Slither Io Mod Apk is a game that gives you immortality, which means you can never die in the game. You can touch other snakes or walls without exploding, and you can also go through yourself without any harm. This makes the game much easier and less frustrating, as you don't have to worry about losing your progress or score. You can also use the anti-death feature, which lets you revive yourself after dying. You can choose to respawn at your previous location, or at a random location on the map. This way, you can continue playing without starting over.

          -

          How to Download and Install Slither Io Mod Apk?

          -

          Slither Io Mod Apk is a game that you can download and install on your Android device easily. Here are the steps to follow:

          -

          Download the Apk File from a Trusted Source

          -

          The first step is to download the apk file of Slither Io Mod Apk from a trusted source. You can search for it on Google or any other search engine, or you can use this link to download it directly. Make sure you download the latest version of the game, which is 1.6 as of June 2023.

          -

          Enable Unknown Sources on Your Device

          -

          The next step is to enable unknown sources on your device, which allows you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources. Turn on the toggle switch to allow unknown sources. You may see a warning message that says installing from unknown sources may harm your device or data. Ignore this message and tap OK.

          -

          Install the Apk File and Launch the Game

          -

          The final step is to install the apk file and launch the game. To do this, go to your file manager app, then locate the downloaded apk file of Slither Io Mod Apk. Tap on it to start the installation process. You may see a pop-up window that asks for your permission to install the app. Tap on install and wait for a few seconds until the installation is complete. Then tap on open to launch the game.

          -

          How to Play Slither Io Mod Apk?

          -

          Slither Io Mod Apk is a game that is very easy to play, but also very challenging and addictive. Here are some tips on how to play it:

          -

          Choose Your Nickname and Skin

          -

          The first thing you need to do when you launch the game is to choose your nickname and skin. You can type any name you want in the nickname box, or leave it blank if you prefer. Then you can choose your skin from the list of available skins, or create your own custom skin by mixing different colors and patterns. You can also change your skin anytime during the game by tapping on the change skin button on the bottom left corner of the screen.

          -

          Control Your Snake with One or Two Fingers

          -

          The next thing you need to do is to control your snake with one or two fingers. You can use one finger to swipe on the screen to move your snake in any direction. You can also use two fingers to tap on the left or right side of the screen to turn your snake left or right. The more you move, the more you lose some length, so be careful not to waste too much energy. You can also use the boost button on the bottom right corner of the screen to make your snake move faster, but this will also make you lose more length.

          -

          Eat Orbs and Kill Other Snakes to Grow Bigger

          -

          The main objective of the game is to eat orbs and kill other snakes to grow bigger. You can eat the orbs that are scattered all over the map, or the ones that are left behind by other snakes when they die. The more orbs you eat, the longer and thicker you become. You can also kill other snakes by making them run into your body. When they explode, you can eat their remains and grow even bigger. However, be careful not to run into other snakes yourself, or you will die too. Unless you are playing with Slither Io Mod Apk, which gives you immortality and anti-death features.

          -

          How to Win Slither Io Mod Apk?

          -

          Slither Io Mod Apk is a game that is very competitive and challenging, even with the extra features and advantages. Here are some strategies on how to win it:

          -

          Use Boost to Dash and Dine

          -

          One of the best ways to win Slither Io Mod Apk is to use boost to dash and dine. Boosting is when you make your snake move faster by pressing the boost button on the bottom right corner of the screen. This will make you lose some length, but it will also give you an edge over other snakes. You can use boost to dash in front of other snakes and make them run into you, or to dine on their remains after they die. You can also use boost to escape from dangerous situations, such as being surrounded by bigger snakes or being chased by faster ones.

          -

          Coil Around Orbs or Smaller Snakes

          -

          Another way to win Slither Io Mod Apk is to coil around orbs or smaller snakes. Coiling is when you make your snake form a circle around something or someone, trapping them inside. This will prevent them from escaping or moving freely, and it will also give you a chance to eat them or their orbs. You can coil around large orbs that are worth more points, or smaller snakes that are easier to catch. However, be careful not to coil around bigger snakes that can break free from your coil, or other snakes that can coil around you.

          -

          Escape from Coils or Traps

          -

          The last way to win Slither Io Mod Apk is to escape from coils or traps. Coils and traps are when other snakes try to encircle you or block your way, making you unable to move or escape. This will put you in a dangerous position, as you may run out of space or run into other snakes. To escape from coils or traps, you need to be quick and smart. You can use boost to dash out of a coil before it closes, or find a gap in a trap before it blocks you. You can also use the anti-death feature of Slither Io Mod Apk, which lets you revive yourself after dying.

          -

          Conclusion

          -

          Slither Io Mod Apk Tidak Bisa Mati is a fun and addictive snake game that lets you play with unlimited coins, skins, and immortality. It is a game that is easy to play but hard to master, as you need to avoid other snakes and grow longer than them. It is a game that is suitable for all ages and preferences, as you can customize your snake and chat with other players online. It is a game that you can download and install on your Android device easily, and play offline or online anytime you want.

          -

          FAQs

          -

          Here are some frequently asked questions about Slither Io Mod Apk:

          - - - - - - -
          Q: Is Slither Io Mod Apk safe?A: Slither Io Mod Apk is generally safe, as long as you download it from a trusted source and enable unknown sources on your device. However, it may have some bugs or glitches, and it may not be compatible with all devices or updates.Q: Is Slither Io Mod Apk legal?A: Slither Io Mod Apk is not legal, as it violates the terms and conditions of the original game. It is also not fair to other players who play the game without any modifications. Therefore, we do not recommend using Slither Io Mod Apk, and we are not responsible for any consequences that may arise from using it.
          Q: Is Slither Io Mod Apk online or offline?A: Slither Io Mod Apk can be played both online and offline. You can play online with other players who are also using the mod apk, or you can play offline with bots. However, playing online may expose you to some risks, such as being banned by the original game or getting viruses from untrusted sources.
          Q: How to update Slither Io Mod Apk?A: To update Slither Io Mod Apk, you need to download the latest version of the apk file from a trusted source and install it on your device. You may need to uninstall the previous version of the game before installing the new one. You may also need to enable unknown sources on your device again if you have disabled it.
          Q: How to uninstall Slither Io Mod Apk?A: To uninstall Slither Io Mod Apk, you need to go to your device settings, then apps, then find Slither Io Mod Apk and tap on it. Then tap on uninstall and confirm your choice. You may also need to delete the apk file from your file manager app if you still have it.
          Q: How to get Slither Io Mod Apk for iOS?A: Unfortunately, there is no Slither Io Mod Apk for iOS devices, as iOS does not support apk files. You can only play the original Slither Io game on iOS devices, which you can download from the App Store.

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Play Last Island of Survival on PC with Emulator A Guide to the Ultimate Survival Sandbox Game.md b/spaces/congsaPfin/Manga-OCR/logs/Play Last Island of Survival on PC with Emulator A Guide to the Ultimate Survival Sandbox Game.md deleted file mode 100644 index 39b07a9120e98dd39b387298b8335b2f0c3209a9..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Play Last Island of Survival on PC with Emulator A Guide to the Ultimate Survival Sandbox Game.md +++ /dev/null @@ -1,100 +0,0 @@ -
          -

          How to Download and Play Last Island of Survival on Your Laptop

          -

          Last Island of Survival is a multiplayer zombie survival game that challenges you to scavenge resources, craft weapons, build shelter, and fight against other players and the undead in a post-apocalyptic island. The game is available for Android and iOS devices, but what if you want to play it on your laptop?

          -

          last island of survival laptop download


          Download ->>> https://urlca.com/2uOdZd



          -

          Playing Last Island of Survival on your laptop has many advantages, such as a bigger screen, improved visibility, better controls, longer battery life, and no interruptions. In this article, we will show you three methods to download and play Last Island of Survival on your laptop using different tools and platforms. Let's get started!

          -

          Method 1: Using BlueStacks Android emulator

          -

          BlueStacks is a popular Android emulator that lets you run Android apps and games on your PC or Mac. It has many features that enhance your gaming experience, such as Eco Mode, Macros, Script, Multi-Instance, Multi-Instance Sync, Real-Time Translation, and more. Here are the steps to use BlueStacks to play Last Island of Survival on your laptop:

          -
            -
          1. Download and install BlueStacks from here.
          2. -
          3. Launch BlueStacks and sign in to Google Play Store with your Google account.
          4. -
          5. Search for Last Island of Survival in the search bar.
          6. -
          7. Click on the game icon and install it.
          8. -
          9. Once the installation is complete, click on the game icon on the home screen to start playing.
          10. -
          -

          You can customize the game controls using your mouse and keyboard, or use the default ones provided by BlueStacks. You can also sync your game progress across devices with a single sign-in to your Google account.

          -

          Method 2: Using Google Play Games on PC

          -

          Google Play Games is a PC application that lets you browse, download, and play select mobile games on Google's gaming platform for PC. You can enjoy bigger, bolder versions of your favorite Android games on your laptop with improved performance and graphics. Here are the steps to use Google Play Games to play Last Island of Survival on your laptop:

          -
            -
          1. Download and install Google Play Games from here.
          2. -
          3. Launch Google Play Games and sign in with your Google account.
          4. -
          5. Browse the available games and look for Last Island of Survival.
          6. -
          7. Click on the game icon and install it.
          8. -
          9. Once the installation is complete, click on the game icon to start playing.
          10. -
          -

          You can use your mouse and keyboard to play the game with more agility and accuracy. You can also sync your game progress and library across devices with a single sign-in to your Google account.

          -

          Method 3: Using Your Phone app

          -

          Your Phone is a Windows app that lets you link your PC and your Android phone and access your phone's apps on your PC. You can use it to play Last Island of Survival on your laptop without downloading any additional software. Here are the steps to use Your Phone app to play Last Island of Survival on your laptop:

          -
            -
          1. Download and install Your Phone app from here.
          2. -
          3. Launch Your Phone app and follow the instructions to link your PC and your Android phone.
          4. -
          5. On your phone, open Settings > Apps & notifications > Advanced > Default apps > Opening links > Your Phone Companion > Open supported links > In this app.
          6. -
          7. On your PC, open Your Phone app and click on Apps.
          8. -
          9. Select Last Island of Survival from the list of apps on your phone.
          10. -
          11. The game will open in a separate window on your PC. You can use your mouse or touchpad to play the game.
          12. -Conclusion -

            Last Island of Survival is a thrilling and addictive zombie survival game that you can enjoy on your laptop with any of the three methods we have shown you. Whether you use BlueStacks, Google Play Games, or Your Phone app, you can experience the game with better graphics, controls, and performance than on your mobile device. You can also sync your game progress and achievements across devices with your Google account.

            -

            Now that you know how to download and play Last Island of Survival on your laptop, you can explore the island, craft weapons, build shelter, and fight for survival with other players and zombies. Be careful, though, as the island is full of dangers and surprises. You never know what you might encounter next.

            -

            last island of survival pc download
            -last island of survival unknown 15 days pc
            -last island of survival bluestacks
            -last island of survival emulator
            -last island of survival gameloop
            -last island of survival ldplayer
            -last island of survival windows 10
            -last island of survival mac download
            -last island of survival apk for pc
            -last island of survival pc gameplay
            -last island of survival pc requirements
            -last island of survival pc online
            -last island of survival pc free download
            -last island of survival pc version
            -last island of survival pc mod apk
            -last island of survival pc cheats
            -last island of survival pc hack
            -last island of survival pc tips and tricks
            -last island of survival pc guide
            -last island of survival pc review
            -how to play last island of survival on pc
            -how to download last island of survival on laptop
            -how to install last island of survival on pc
            -how to update last island of survival on pc
            -how to run last island of survival on pc
            -how to get last island of survival on pc
            -how to play last island of survival with keyboard and mouse
            -how to play last island of survival with friends on pc
            -how to play last island of survival offline on pc
            -how to play last island of survival without emulator
            -best emulator for last island of survival on pc
            -best settings for last island of survival on pc
            -best graphics for last island of survival on pc
            -best server for last island of survival on pc
            -best clan for last island of survival on pc
            -best weapons for last island of survival on pc
            -best base for last island of survival on pc
            -best strategy for last island of survival on pc
            -best way to survive in last island of survival on pc
            -best way to get resources in last island of survival on pc
            -descargar e instalar last island of survival en pc (Spanish)
            -baixar e instalar o jogo de sobrevivência da última ilha no PC (Portuguese)
            -télécharger et installer le jeu de survie de la dernière île sur PC (French)
            -scaricare e installare il gioco di sopravvivenza dell'ultima isola su PC (Italian)
            -herunterladen und installieren Sie das Überlebensspiel der letzten Insel auf dem PC (German)
            -скачать и установить игру по выживанию на последнем острове на ПК (Russian)
            -パソコンに最後の島のサバイバルゲームをダウンロードしてインストールする (Japanese)
            -컴퓨터에 마지막 섬 생존 게임 다운로드 및 설치하기 (Korean)
            -在电脑上下载并安装最后一座岛屿的生存游戏 (Chinese)

            -

            We hope you found this article helpful and informative. If you have any questions or feedback, please let us know in the comments below. Happy gaming!

            -

            FAQs

            -

            Is Last Island of Survival free to play?

            -

            Yes, Last Island of Survival is free to play on Android and iOS devices. However, it may contain in-app purchases and ads that require real money.

            -

            Can I play Last Island of Survival offline?

            -

            No, Last Island of Survival requires an internet connection to play. You need to connect to the game server and interact with other players online.

            -

            How can I save my game progress in Last Island of Survival?

            -

            You can save your game progress in Last Island of Survival by signing in to your Google account on your device. This way, you can sync your game data across devices and platforms.

            -

            How can I change the language in Last Island of Survival?

            -

            You can change the language in Last Island of Survival by going to Settings > Language and selecting your preferred language from the list.

            -

            How can I report a bug or a problem in Last Island of Survival?

            -

            You can report a bug or a problem in Last Island of Survival by going to Settings > Feedback and filling out the form with your issue and contact information.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/RESPEM PA POU FAKE - KINGSTREET ft. IZOLAN MP3 and Streaming Online.md b/spaces/congsaPfin/Manga-OCR/logs/RESPEM PA POU FAKE - KINGSTREET ft. IZOLAN MP3 and Streaming Online.md deleted file mode 100644 index dd21f6cc92c566744e871441d00121b232ddf956..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/RESPEM PA POU FAKE - KINGSTREET ft. IZOLAN MP3 and Streaming Online.md +++ /dev/null @@ -1,92 +0,0 @@ -
            -

            How to Download RespeM Pa Pou Fake by King Street and Izolan

            -

            If you are a fan of Haitian music, you may have heard of the song RespeM Pa Pou Fake by King Street and Izolan. This song is a hit among Haitians and non-Haitians alike, thanks to its catchy beat, witty lyrics, and powerful message. In this article, we will tell you what RespeM Pa Pou Fake is, why you should download it, and how to do it legally and safely.

            -

            respem pa pou fake mp3 download


            Download Ziphttps://urlca.com/2uO6ej



            -

            What is RespeM Pa Pou Fake?

            -

            RespeM Pa Pou Fake is a Haitian Creole phrase that means \"respect is not for fake\". It is also the title of a song by King Street, a Haitian rap group, featuring Izolan, a Haitian rapper and singer. The song was released in March 2023 as a single from King Street's album of the same name.

            -

            A popular Haitian song

            -

            RespeM Pa Pou Fake is one of the most popular Haitian songs of 2023. It has over 8 million views on YouTube, over 175 thousand downloads on Dyaspora Mizik, and over 120 thousand streams on Boomplay. It has also been featured on various playlists and radio stations, such as Best Haitian Music 2023, DJAKOUT #1 Habitude, and KADO de Ti Lunet Power of Love.

            -

            A collaboration between King Street and Izolan

            -

            RespeM Pa Pou Fake is a collaboration between King Street and Izolan, two of the most prominent figures in the Haitian music scene. King Street is a rap group composed of four members: Pedro Force, Maestro Jolicoeur, Bourik the Latalay, and Esther Surpris. They are known for their songs that mix rap, kompa, reggae, and R&B. Izolan is a rapper and singer who rose to fame as a member of Barikad Crew, a rap group that was influential in the Haitian hip-hop movement. He is also a solo artist who has released several albums and singles.

            -

            A message of respect and authenticity

            -

            RespeM Pa Pou Fake is not just a catchy song, but also a message of respect and authenticity. The song challenges the listeners to be true to themselves and not to pretend to be someone they are not. It also urges them to respect others who are genuine and honest, and not to fall for fake people who lie and deceive. The song reflects the values and struggles of many Haitians who face poverty, corruption, violence, and discrimination in their country.

            -

            Why download RespeM Pa Pou Fake?

            -

            There are many reasons why you should download RespeM Pa Pou Fake. Here are some of them:

            -

            respem pa pou fake izolan mp3 download
            -king street respem pa pou fake mp3 download
            -respem pa pou fake feat izolan mp3 download
            -respem pa pou fake kingstreet ft izolan mp3 download
            -respem pa pou fake song mp3 download
            -respem pa pou fake lyrics mp3 download
            -respem pa pou fake shazam mp3 download
            -respem pa pou fake boomplay mp3 download
            -respem pa pou fake dyaspora mizik mp3 download
            -respem pa pou fake single mp3 download
            -respem pa pou fake album mp3 download
            -respem pa pou fake music video mp3 download
            -respem pa pou fake rap kreyol mp3 download
            -respem pa pou fake haitian music mp3 download
            -respem pa pou fake 2023 mp3 download
            -respem pa pou fake free mp3 download
            -respem pa pou fake online mp3 download
            -respem pa pou fake streaming mp3 download
            -respem pa pou fake audio mp3 download
            -respem pa pou fake soundcloud mp3 download
            -respem pa pou fake youtube mp3 download
            -respem pa pou fake spotify mp3 download
            -respem pa pou fake apple music mp3 download
            -respem pa pou fake amazon music mp3 download
            -respem pa pou fake deezer mp3 download
            -respem pa pou fake tidal mp3 download
            -respem pa pou fake napster mp3 download
            -respem pa pou fake pandora mp3 download
            -respem pa pou fake iheartradio mp3 download
            -respem pa pou fake tunein mp3 download
            -respem pa pou fake audiomack mp3 download
            -respem pa pou fake anghami mp3 download
            -respem pa pou fake saavn mp3 download
            -respem pa pou fake gaana mp3 download
            -respem pa pou fake jiosaavn mp3 download
            -respem pa pou fake wynk music mp3 download
            -respem pa pou fake hungama music mp3 download
            -respem pa pou fake rhapsody mp3 download
            -respem pa pour fake slacker radio mp3 download
            -respem pa pour fake last.fm mp3 download

            -

            To enjoy the catchy beat and lyrics

            -

            RespeM Pa Pou Fake has a catchy beat that will make you want to dance and sing along. The song combines rap, kompa, reggae, and R&B elements to create a unique sound that appeals to different tastes. The lyrics are witty, clever, and humorous, using wordplay, metaphors, and references to convey the message of the song. Some of the memorable lines are: - RespeM pa pou fake, respeM pou moun ki gen lavi (Respect is not for fake, respect for people who have life) - Mwen pa bezwen moun ki pa renmen mwen, mwen bezwen moun ki renmen mwen pou sa mwen ye (I don't need people who don't love me, I need people who love me for who I am) - Mwen pa janm chanje, se moun yo ki chanje, se fake yo ki ap monte (I never change, it's the people who change, it's the fake ones who are rising)

            -

            To support Haitian music and artists

            -

            By downloading RespeM Pa Pou Fake, you are also supporting Haitian music and artists. Haiti has a rich and diverse musical heritage that reflects its history, culture, and identity. Haitian music is influenced by African, European, Caribbean, and American styles, such as vodou, kompa, rara, rap kreyol, zouk, reggae, and jazz. Haitian music is also a form of expression and resistance against oppression and injustice. By downloading RespeM Pa Pou Fake, you are showing appreciation and solidarity for Haitian music and artists who work hard to create quality content and represent their country.

            -

            To learn more about Haitian culture and history

            -

            Another reason to download RespeM Pa Pou Fake is to learn more about Haitian culture and history. Haiti is a country with a rich and complex history that has shaped its culture and society. Haiti was the first independent black republic in the world, after a successful slave revolt against French colonial rule in 1804. Haiti has also faced many challenges and difficulties, such as political instability, natural disasters, poverty, and foreign intervention. Haiti has a vibrant and diverse culture that includes art, literature, religion, cuisine, sports, and music. By downloading RespeM Pa Pou Fake, you are exposing yourself to a glimpse of Haitian culture and history that you may not find elsewhere.

            -

            How to download RespeM Pa Pou Fake legally and safely?

            -

            Now that you know what RespeM Pa Pou Fake is and why you should download it, you may be wondering how to do it legally and safely. There are many ways to download RespeM Pa Pou Fake online, but not all of them are legal or safe. Some websites or programs may contain viruses, malware, or spyware that can harm your device or compromise your privacy. Some websites or programs may also violate the intellectual property rights of the artists or the producers of the song. To avoid these risks and respect the work of the creators, here are some ways to download RespeM Pa Pou Fake legally and safely:

            -

            Buy the song from iTunes or Google Play

            -

            One of the easiest and safest ways to download RespeM Pa Pou Fake is to buy it from iTunes or Google Play. These are official platforms that offer high-quality downloads of songs for a reasonable price. You can also access other features such as lyrics, album art, ratings, reviews, and playlists. By buying the song from iTunes or Google Play, you are also supporting the artists financially and legally.

            -

            Stream the song from YouTube or Spotify

            -

            Another way to download RespeM Pa Pou Fake is to stream it from YouTube or Spotify. These are popular platforms that offer free access to millions of songs online. You can also enjoy other benefits such as videos, subtitles, recommendations, social media integration, and offline mode. However, streaming the song from YouTube or Spotify does not mean that you own the song or that you can download it to your device. You need an internet connection to access the song and you may encounter ads or interruptions. If you want to download the song to your device, you need to use a third-party website or program that can convert YouTube videos or Spotify songs to MP3 files.

            -

            Use a reliable and secure website or program to convert YouTube videos to MP3 files

            -

            If you want to download RespeM Pa Pou Fake from YouTube as an MP3 file, you need to use a reliable and secure website or program that can convert YouTube videos to MP3 files. There are many websites and programs that offer this service, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, spyware, pop-ups, or other unwanted elements that can harm your device or compromise your privacy. Some of them may also have low-quality conversions, slow speeds, limited options, or hidden fees. To avoid these problems, here are some of the best websites and programs that can convert YouTube videos to MP3 files legally and safely:

            -

            Dirpy

            -

            Dirpy is a website that allows you to download YouTube videos as MP3 files for free. It has a simple and user-friendly interface that lets you paste the URL of the YouTube video you want to download, choose the audio quality and format, edit the ID3 tags, and download the file. Dirpy also has a bookmarklet that you can add to your browser for easy access. Dirpy respects the intellectual property rights of the content owners and only allows downloads of videos that are in the public domain or have a Creative Commons license.

            -

            Motionbox's YouTube to MP3

            -

            Motionbox's YouTube to MP3 is a website that allows you to download YouTube videos as MP3 files for free. It has a sleek and modern interface that lets you paste the URL of the YouTube video you want to download, choose the audio quality and format, and download the file. Motionbox's YouTube to MP3 also has a Chrome extension that you can install for easy access. Motionbox's YouTube to MP3 is fast, secure, and reliable, and does not require any registration or installation.

            -

            Free MP3 Hunter

            -

            Free MP3 Hunter is a website that allows you to download YouTube videos as MP3 files for free. It has a simple and minimalist interface that lets you paste the URL of the YouTube video you want to download, choose the audio quality and format, and download the file. Free MP3 Hunter also has an app that you can download for Android devices. Free MP3 Hunter is safe, efficient, and convenient, and does not have any ads or pop-ups.

            -

            Conclusion

            -

            RespeM Pa Pou Fake by King Street and Izolan is a popular Haitian song that has a catchy beat, witty lyrics, and a powerful message of respect and authenticity. It is also a way to enjoy, support, and learn more about Haitian music and culture. To download RespeM Pa Pou Fake legally and safely, you can buy it from iTunes or Google Play, stream it from YouTube or Spotify, or use a reliable and secure website or program to convert YouTube videos to MP3 files. We hope this article has helped you find the best way to download RespeM Pa Pou Fake by King Street and Izolan.

            -

            FAQs

            -

            Here are some frequently asked questions about RespeM Pa Pou Fake by King Street and Izolan:

            -
              -
            1. What does RespeM Pa Pou Fake mean?
            2. -

              RespeM Pa Pou Fake is a Haitian Creole phrase that means \"respect is not for fake\". It is also the title of a song by King Street and Izolan.

              -
            3. Who are King Street and Izolan?
            4. -

              King Street is a Haitian rap group composed of four members: Pedro Force, Maestro Jolicoeur, Bourik the Latalay, and Esther Surpris. Izolan is a Haitian rapper and singer who was formerly a member of Barikad Crew.

              -
            5. When was RespeM Pa Pou Fake released?
            6. -

              RespeM Pa Pou Fake was released in March 2023 as a single from King Street's album of the same name.

              -
            7. How popular is RespeM Pa Pou Fake?
            8. -

              RespeM Pa Pou Fake is one of the most popular Haitian songs of 2023. It has over 8 million views on YouTube[^1 ^], over 175 thousand downloads on Dyaspora Mizik, and over 120 thousand streams on Boomplay. It has also been featured on various playlists and radio stations, such as Best Haitian Music 2023, DJAKOUT #1 Habitude, and KADO de Ti Lunet Power of Love.

              -
            9. How can I download RespeM Pa Pou Fake legally and safely?
            10. -

              You can download RespeM Pa Pou Fake legally and safely by buying it from iTunes or Google Play, streaming it from YouTube or Spotify, or using a reliable and secure website or program to convert YouTube videos to MP3 files, such as Dirpy, Motionbox's YouTube to MP3, or Free MP3 Hunter.

              -

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/X Explorer File Manager Pro - Get the Donate Version for Free with MOD APK.md b/spaces/congsaPfin/Manga-OCR/logs/X Explorer File Manager Pro - Get the Donate Version for Free with MOD APK.md deleted file mode 100644 index 56b35f849413479cf5c723ea93e24057628894c2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/X Explorer File Manager Pro - Get the Donate Version for Free with MOD APK.md +++ /dev/null @@ -1,110 +0,0 @@ - -

            X Explorer Mod APK: A Powerful File Manager for Android

            -

            If you are looking for a file manager app that can do more than just browsing and copying files, you might want to check out X Explorer Mod APK. This is a modified version of X-Plore File Manager, a popular app that offers a lot of features and functions for managing your files on your Android device. In this article, we will tell you what X Explorer Mod APK is, what features it has, how to download and install it, and what are the benefits and drawbacks of using it.

            -

            What is X Explorer Mod APK?

            -

            X Explorer Mod APK is a file system management utility for Android 2.1+ developed by the Slovakian software house Lonely Cat Games. The app is only available for free on Google Play without even the support of advertising banners. However, it is possible to donate to the developer.

            -

            x explorer mod apk


            Download Zip ————— https://urlca.com/2uO8mb



            -

            The modded version of X-Plore File Manager, which is also known as X Explorer Mod APK, unlocks some premium features that are otherwise available only to donors. These features include dual-pane mode, root access, cloud storage integration, media player and viewer, archive and encryption support, FTP and SSH client, SQLite database explorer, hex editor and text editor, and more.

            -

            Features of X Explorer Mod APK

            -

            X Explorer Mod APK has a lot of features that make it a powerful and versatile file manager app for Android. Here are some of the main features that you can enjoy with this app:

            -

            Dual-pane mode

            -

            This feature allows you to view two folders or files side by side on your screen. You can easily drag and drop files between the two panes, or compare them with each other. You can also open multiple tabs in each pane for easy navigation.

            -

            Root access

            -

            If you have a rooted device, you can use X Explorer Mod APK to access the system files and folders that are normally hidden or protected. You can modify or delete any file or folder on your device, as well as change permissions and ownership. However, you should be careful when doing this, as you might damage your device or lose your data.

            -

            Cloud storage integration

            -

            X Explorer Mod APK supports various cloud storage services, such as Google Drive, Dropbox, OneDrive, Mega, Box, Yandex Disk, MediaFire, OwnCloud, SugarSync, WebDAV, FTP, and more. You can access your cloud files directly from the app, or upload or download files between your device and the cloud.

            -

            Media player and viewer

            -

            X Explorer Mod APK can play or view various types of media files, such as images, videos, music, documents, PDFs, ebooks, archives, and more. You can also create playlists or slideshows from your media files. The app supports subtitles and streaming for video files.

            -

            Archive and encryption support

            -

            X Explorer Mod APK can create or extract various types of archives, such as ZIP, RAR, 7Z, TAR, GZIP, BZIP2, XZ, LZMA, ISO, etc. You can also encrypt or decrypt your files or folders with AES-256 algorithm. You can also password-protect your archives or files.

            -

            x plore file manager mod apk
            -x plore file manager pro mod apk
            -x plore file manager donate mod apk
            -x plore file manager premium mod apk
            -x plore file manager unlocked mod apk
            -x plore file manager latest mod apk
            -x plore file manager cracked mod apk
            -x plore file manager hack mod apk
            -x plore file manager full mod apk
            -x plore file manager free mod apk
            -x plore file manager mod apk download
            -x plore file manager mod apk android
            -x plore file manager mod apk 2023
            -x plore file manager mod apk 4.31.10
            -x plore file manager mod apk no root
            -x plore file manager mod apk for tv
            -x plore file manager mod apk for firestick
            -x plore file manager mod apk for pc
            -x plore file manager mod apk for ios
            -x plore file manager mod apk for windows
            -download x explorer pro mod apk
            -download x explorer donate mod apk
            -download x explorer premium mod apk
            -download x explorer unlocked mod apk
            -download x explorer latest mod apk
            -download x explorer cracked mod apk
            -download x explorer hack mod apk
            -download x explorer full mod apk
            -download x explorer free mod apk
            -download x explorer mod apk android
            -download x explorer mod apk 2023
            -download x explorer mod apk 4.31.10
            -download x explorer mod apk no root
            -download x explorer mod apk for tv
            -download x explorer mod apk for firestick
            -download x explorer mod apk for pc
            -download x explorer mod apk for ios
            -download x explorer mod apk for windows
            -how to install x explorer mod apk
            -how to use x explorer mod apk
            -how to update x explorer mod apk
            -how to uninstall x explorer mod apk
            -how to get x explorer pro for free
            -how to get x explorer donate for free
            -how to get x explorer premium for free
            -how to get x explorer unlocked for free
            -how to get x explorer latest version for free
            -how to get x explorer cracked version for free

            -

            FTP and SSH client

            -

            X Explorer Mod APK can connect to FTP or SSH servers and transfer files between your device and the server. You can also browse or edit files on the server. The app supports FTPS (SSL/TLS) and SFTP (SSH File Transfer Protocol) for secure connections.

            -

            SQLite database explorer

            -

            X Explorer Mod APK can view or edit or create files in hexadecimal or text format. You can also compare files in binary or text mode, or search for strings or hex values in files. Hex editors are useful for debugging, reverse engineering, data recovery, or modifying files that are not normally editable by other applications.

            -

            How to download and install X Explorer Mod APK?

            -

            If you want to try X Explorer Mod APK on your Android device, you need to follow these steps:

            -
              -
            1. Download the X Explorer Mod APK file from a trusted source. You can find the latest version of the file here: [text].
            2. -
            3. Enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
            4. -
            5. Locate the downloaded APK file on your device and tap on it to start the installation process. Follow the instructions on the screen to complete the installation.
            6. -
            7. Launch the X Explorer app from your app drawer and enjoy its features.
            8. -
            -

            Note: You may need to uninstall the original X-Plore File Manager app before installing the modded version, as they may conflict with each other.

            -

            Benefits of using X Explorer Mod APK

            -

            X Explorer Mod APK is a great file manager app for Android users who want to have more control and functionality over their files and folders. Some of the benefits of using this app are:

            -
              -
            • You can access and manage all your files and folders on your device, including system files and hidden files.
            • -
            • You can connect to various cloud storage services and transfer files between your device and the cloud.
            • -
            • You can play or view different types of media files without needing other apps.
            • -
            • You can create or extract archives, encrypt or decrypt files, password-protect files, and more.
            • -
            • You can connect to FTP or SSH servers and browse or edit files on the server.
            • -
            • You can view or edit SQLite databases, hex files, text files, and more.
            • -
            • You can use dual-pane mode to view two folders or files side by side and compare or copy them easily.
            • -
            -

            Drawbacks of using X Explorer Mod APK

            -

            While X Explorer Mod APK is a powerful and versatile file manager app, it also has some drawbacks that you should be aware of before using it. Some of the drawbacks are:

            -
              -
            • The app is not available on Google Play Store, so you need to download it from a third-party source, which may pose some security risks.
            • -
            • The app may not be compatible with some devices or Android versions, or may cause some bugs or crashes.
            • -
            • The app may require root access for some features, which may void your warranty or damage your device if done incorrectly.
            • -
            • The app may be too complex or overwhelming for some users who prefer a simpler or more minimalist interface.
            • -
            -

            Conclusion

            -

            X Explorer Mod APK is a file manager app that offers a lot of features and functions for managing your files on your Android device. It allows you to access and modify system files, connect to cloud storage services, play or view media files, create or extract archives, encrypt or decrypt files, connect to FTP or SSH servers, view or edit SQLite databases, hex files, text files, and more. It also has a dual-pane mode that lets you view two folders or files side by side. However, the app is not available on Google Play Store, so you need to download it from a third-party source. It may also not be compatible with some devices or Android versions, or may require root access for some features. The app may also be too complex or overwhelming for some users who prefer a simpler or more minimalist interface. Therefore, you should weigh the pros and cons of using this app before installing it on your device.

            -

            FAQs

            -
              -
            1. What is the difference between X Explorer Mod APK and X-Plore File Manager?
            2. -

              X Explorer Mod APK is a modified version of X-Plore File Manager that unlocks some premium features that are otherwise available only to donors. These features include dual-pane mode, root access, cloud storage integration, media player and viewer, archive and encryption support, FTP and SSH client, SQLite database explorer, hex editor and text editor, and more.

              -
            3. Is X Explorer Mod APK safe to use?
            4. -

              X Explorer Mod APK is generally safe to use if you download it from a trusted source. However, you should always scan the APK file with an antivirus program before installing it on your device. You should also be careful when accessing or modifying system files or folders with this app, as you might damage your device or lose your data.

              -
            5. How do I update X Explorer Mod APK? 197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Zooba The Best Brawl Battle Royale Game - APK Free Download.md b/spaces/congsaPfin/Manga-OCR/logs/Zooba The Best Brawl Battle Royale Game - APK Free Download.md deleted file mode 100644 index a6f4cdf18848f4ef831ab4cffcb74bdd3ffeb989..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Zooba The Best Brawl Battle Royale Game - APK Free Download.md +++ /dev/null @@ -1,117 +0,0 @@ -
              -

              Zooba: Fun Battle Royale Games APK Download

              -

              If you are looking for a fun and exciting online multiplayer shooting game, you should check out Zooba: Fun Battle Royale Games. Zooba is a free-to-play game that combines the best of MOBA and battle royale genres. You can choose from a variety of animal characters, each with their own abilities and playstyles, and fight against other players in an epic brawl. You can download and install Zooba APK on your Android device and enjoy the game anytime, anywhere. In this article, we will tell you more about what Zooba is, why you should play it, and how to download and install it. We will also share some tips and tricks to help you become the ultimate star in this game.

              -

              What is Zooba?

              -

              Zooba is a free online multiplayer shooting game developed by Wildlife Studios. It was released in 2019 and has since gained over 50 million downloads on Google Play Store. Zooba is a unique game that combines the elements of MOBA (multiplayer online battle arena) and battle royale games. In Zooba, you can choose from a variety of animal characters, such as a gorilla, a lion, a fox, a panda, and more. Each character has their own abilities and playstyles, such as melee, ranged, stealth, support, etc. You can also customize your character with different skins and outfits.

              -

              zooba fun battle royale games apk download


              Download Zip ⚹⚹⚹ https://urlca.com/2uO80N



              -

              In Zooba, you will join an online community of players who are ready for thrilling brawls and non-stop action. You will enter a map with up to 20 players and fight for survival. The map is filled with different items, such as weapons, health kits, shields, crates, etc. You can collect and upgrade these items to increase your firepower and survivability. You can also use the environment to your advantage, such as hiding in bushes, swimming in water, or jumping on trampolines. The map will also shrink over time due to the fire zone, which will force you to move closer to your enemies. The last player or team standing wins the match.

              -

              Features of Zooba

              -

              Zooba has many features that make it an enjoyable and addictive game. Some of these features are:

              -
                -
              • Free online multiplayer game for endless hours of fun
              • -
              • Fast-paced battle royale action with unique shooting mechanics
              • -
              • Choose from a variety of characters with distinct abilities and playstyles
              • -
              • Challenge your friends or play solo in intense battles
              • -
              • Level up and unlock new abilities to dominate the Zooba battlegrounds
              • -
              • Collect and upgrade weapons for devastating firepower
              • -
              • Compete in seasonal events and climb the ranks to become the top star
              • -
              -

              How to download and install Zooba APK

              -

              If you want to play Zooba on your Android device, you can download and install Zooba APK from APKCombo. APKCombo is a reliable website that offers safe and fast downloads of various APK files. To download and install Zooba APK from APKCombo, follow these steps:

              -
                -
              1. Go to [Zooba APK](^1^) page on APKCombo.
              2. -
              3. Click on the "Download APK" button.
              4. -
              5. Choose a download option (APK or XAPK) and wait for the file to be downloaded.
              6. -
              7. If you downloaded an XAPK file, you will need to install APKCombo Installer app first to extract the APK file from the downloaded XAPK file. You can download APKCombo Installer app from [here].
              8. -
              9. Once you have the APK file, locate it on your device and tap on it to install it.
              10. -
              11. Allow the installation of unknown apps if prompted by your device.
              12. -
              13. Wait for the installation to finish and launch the game.
              14. -
              -

              Congratulations, you have successfully downloaded and installed Zooba APK on your Android device. You can now enjoy the game and have fun with your friends.

              -

              Why play Zooba?

              -

              Zooba is not just another battle royale game. It is a game that offers a unique and fun experience for players of all ages and skill levels. Here are some reasons why you should play Zooba:

              -

              Enjoy a free online multiplayer game

              -

              Zooba is a free-to-play game that does not require any subscription or registration. You can play it anytime, anywhere, as long as you have an internet connection. You can also play it on different devices, such as Android, iOS, and PC. Zooba is a game that you can enjoy without spending any money, although you can also purchase in-game items and premium features if you want to enhance your gameplay.

              -

              Experience fast-paced battle royale action

              -

              Zooba is a game that will keep you on your toes with its fast-paced and dynamic gameplay. You will enter a map with up to 20 players and fight for survival in a 3-5 minute match. You will have to move quickly, collect items, shoot enemies, and avoid the fire zone. You will also have to use your character's abilities and strategies to gain an edge over your opponents. Zooba is a game that will test your skills, reflexes, and wits in an exciting and intense battle royale mode.

              -

              zooba free online multiplayer shooting game apk
              -zooba epic brawl battle royale android download
              -zooba fun action game with unique characters apk
              -zooba fast-paced online shooting game download
              -zooba free fire with animals apk
              -zooba ultimate brawl stars game apk
              -zooba fun shooting game with different abilities apk
              -zooba online multiplayer action game download
              -zooba free battle royale game with animals apk
              -zooba epic shooting game with friends apk
              -zooba free online action game with zoos apk
              -zooba fun multiplayer game with weapons apk
              -zooba best battle royale game for android download
              -zooba free fire and brawl stars game apk
              -zooba fun action game with levels and upgrades apk
              -zooba online shooting game with unique mechanics download
              -zooba free online game with animal characters apk
              -zooba epic action game with fire and frag apk
              -zooba free multiplayer game with challenges and events apk
              -zooba fun shooting game with different playstyles download
              -zooba ultimate action game with stars and trophies apk
              -zooba epic brawl stars and free fire game apk
              -zooba fun multiplayer game with skills and abilities download
              -zooba free online shooting game with animal stars apk
              -zooba epic action game with weapons and upgrades download
              -zooba fun battle royale game with friends and foes apk
              -zooba free online game with fast-paced action download
              -zooba epic shooting game with animal abilities apk
              -zooba fun multiplayer game with seasons and ranks download
              -zooba free battle royale game with unique characters apk
              -zooba epic action game with fire and frag download
              -zooba fun shooting game with levels and rewards apk
              -zooba free online multiplayer game with animal brawls download
              -zooba epic battle royale game for android apk
              -zooba fun action game with different weapons and modes download
              -zooba free online shooting game with stars and trophies apk
              -zooba epic multiplayer game with animal skills download
              -zooba fun battle royale game with challenges and events apk
              -zooba free online action game with unique mechanics download
              -zooba epic shooting game with friends and foes apk
              -zooba fun multiplayer game with animal playstyles download
              -zooba free battle royale game with fire and frag apk
              -zooba epic action game with levels and upgrades download
              -zooba fun shooting game with animal characters and abilities apk
              -zooba free online multiplayer game with fast-paced action download

              -

              Choose from a variety of characters

              -

              Zooba is a game that lets you choose from a variety of animal characters, each with their own abilities and playstyles. You can choose from 23 characters, such as a gorilla, a lion, a fox, a panda, a chameleon, a hippo, and more. Each character has their own strengths and weaknesses, such as melee, ranged, stealth, support, etc. You can also customize your character with different skins and outfits to make them look more cool and unique. Zooba is a game that gives you the freedom to choose your favorite character and express your personality.

              -

              Challenge your friends or play solo

              -

              Zooba is a game that lets you play with your friends or by yourself. You can join or create a clan with your friends and team up with them in matches. You can also chat with them and send them gifts. You can also play solo if you prefer to go solo or practice your skills. Zooba is a game that lets you have fun with your friends or on your own.

              -

              Level up and unlock new abilities

              -

              Zooba is a game that lets you level up and unlock new abilities for your characters. As you play the game, you will earn experience points and coins that you can use to upgrade your character's abilities. Each character has four abilities that you can unlock and improve, such as health, damage, speed, and special skills. You can also unlock new characters by collecting trophies or gems. Zooba is a game that lets you progress and improve your performance in the game.

              -

              Compete in seasonal events and rank up

              -

              Zooba is a game that lets you compete in seasonal events and rank up in the leaderboards. Every season, there are different events that you can participate in, such as tournaments, challenges, missions, etc. These events will reward you with exclusive items, such as skins, outfits, weapons, etc. You can also rank up by earning stars from matches and climb the leaderboards to become the top star in Zooba. Zooba is a game that lets you show off your skills and achievements in the game.

              -

              Tips and tricks for playing Zooba

              -

              If you want to become the ultimate star in Zooba, you need to know some tips and tricks that will help you win more matches and have more fun. Here are some tips and tricks for playing Zooba:

              -

              Choose your character wisely

              -

              One of the most important decisions you need to make in Zooba is choosing your character. Each character has their own abilities and playstyles that suit different situations and preferences. You need to choose a character that matches your style and strategy. For example, if you like to be aggressive and deal high damage, you might want to choose a melee character like Bruce or Finn. If you like to be sneaky and snipe your enemies from afar, you might want to choose a ranged character like Nix or Pepper. If you like to be supportive and heal your allies, you might want to choose a support character like Duke or Lizzy. You can also switch your character depending on the map, the mode, and the enemies you are facing. Zooba is a game that lets you experiment with different characters and find your favorite one. -

              Use the environment to your advantage

              -

              Zooba is a game that has a dynamic and interactive environment that you can use to your advantage. You can hide in bushes, swim in water, jump on trampolines, break crates, and more. You can use these features to surprise your enemies, escape from danger, or gain an edge in combat. For example, you can hide in bushes to ambush your enemies or avoid detection. You can swim in water to heal yourself or cross the map faster. You can jump on trampolines to reach higher places or dodge attacks. You can break crates to find weapons or items. Zooba is a game that lets you explore and utilize the environment to your benefit. -

              Collect and upgrade weapons

              -

              Zooba is a game that has a variety of weapons that you can collect and upgrade to increase your firepower and survivability. You can find weapons scattered around the map or inside crates. You can also buy weapons from vending machines or loot them from defeated enemies. You can carry up to two weapons at a time and switch between them as needed. You can also upgrade your weapons by finding duplicates of the same weapon or by using coins. Upgrading your weapons will improve their damage, range, accuracy, and reload speed. Zooba is a game that lets you customize and optimize your weapons for different situations and preferences. -

              Stay alert and avoid the fire zone

              -

              Zooba is a game that requires you to stay alert and avoid the fire zone that will shrink the map over time. The fire zone is a red circle that will appear on the map and gradually close in on the center. If you are caught in the fire zone, you will take damage over time and eventually die. You need to pay attention to the fire zone indicator on the top of the screen and move towards the safe zone as soon as possible. You also need to watch out for other players who might try to ambush you or block your way. Zooba is a game that tests your awareness and survival skills in a shrinking battleground. -

              Conclusion

              -

              Zooba is a fun and exciting online multiplayer shooting game that combines the best of MOBA and battle royale genres. You can choose from a variety of animal characters, each with their own abilities and playstyles, and fight against other players in an epic brawl. You can download and install Zooba APK on your Android device and enjoy the game anytime, anywhere. Zooba is a game that offers a unique and fun experience for players of all ages and skill levels. If you are looking for a free online multiplayer game that will keep you entertained and challenged, you should try Zooba: Fun Battle Royale Games.

              -

              FAQs

              -

              Here are some frequently asked questions about Zooba:

              -
                -
              • Q: Is Zooba free to play?
              • -
              • A: Yes, Zooba is free to play and does not require any subscription or registration. However, you can also purchase in-game items and premium features if you want to enhance your gameplay.
              • -
              • Q: How many players can play in one match?
              • -
              • A: Zooba supports up to 20 players in one match, either solo or in teams of 2 or 4.
              • -
              • Q: How long does one match last?
              • -
              • A: One match lasts for about 3-5 minutes, depending on how fast the fire zone shrinks and how many players are left.
              • -
              • Q: What are the minimum requirements for playing Zooba on Android?
              • -
              • A: Zooba requires Android 6.0 or higher and at least 2 GB of RAM to run smoothly.
              • -
              • Q: How can I contact the developers of Zooba?
              • -
              • A: You can contact the developers of Zooba by sending an email to support@wildlifestudios.com or by visiting their website at [https://wildlifestudios.com/].
              • -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/image/photometric.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/image/photometric.py deleted file mode 100644 index 5085d012019c0cbf56f66f421a378278c1a058ae..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/image/photometric.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 -import numpy as np - -from ..utils import is_tuple_of -from .colorspace import bgr2gray, gray2bgr - - -def imnormalize(img, mean, std, to_rgb=True): - """Normalize an image with mean and std. - - Args: - img (ndarray): Image to be normalized. - mean (ndarray): The mean to be used for normalize. - std (ndarray): The std to be used for normalize. - to_rgb (bool): Whether to convert to rgb. - - Returns: - ndarray: The normalized image. - """ - img = img.copy().astype(np.float32) - return imnormalize_(img, mean, std, to_rgb) - - -def imnormalize_(img, mean, std, to_rgb=True): - """Inplace normalize an image with mean and std. - - Args: - img (ndarray): Image to be normalized. - mean (ndarray): The mean to be used for normalize. - std (ndarray): The std to be used for normalize. - to_rgb (bool): Whether to convert to rgb. - - Returns: - ndarray: The normalized image. - """ - # cv2 inplace normalization does not accept uint8 - assert img.dtype != np.uint8 - mean = np.float64(mean.reshape(1, -1)) - stdinv = 1 / np.float64(std.reshape(1, -1)) - if to_rgb: - cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace - cv2.subtract(img, mean, img) # inplace - cv2.multiply(img, stdinv, img) # inplace - return img - - -def imdenormalize(img, mean, std, to_bgr=True): - assert img.dtype != np.uint8 - mean = mean.reshape(1, -1).astype(np.float64) - std = std.reshape(1, -1).astype(np.float64) - img = cv2.multiply(img, std) # make a copy - cv2.add(img, mean, img) # inplace - if to_bgr: - cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace - return img - - -def iminvert(img): - """Invert (negate) an image. - - Args: - img (ndarray): Image to be inverted. - - Returns: - ndarray: The inverted image. - """ - return np.full_like(img, 255) - img - - -def solarize(img, thr=128): - """Solarize an image (invert all pixel values above a threshold) - - Args: - img (ndarray): Image to be solarized. - thr (int): Threshold for solarizing (0 - 255). - - Returns: - ndarray: The solarized image. - """ - img = np.where(img < thr, img, 255 - img) - return img - - -def posterize(img, bits): - """Posterize an image (reduce the number of bits for each color channel) - - Args: - img (ndarray): Image to be posterized. - bits (int): Number of bits (1 to 8) to use for posterizing. - - Returns: - ndarray: The posterized image. - """ - shift = 8 - bits - img = np.left_shift(np.right_shift(img, shift), shift) - return img - - -def adjust_color(img, alpha=1, beta=None, gamma=0): - r"""It blends the source image and its gray image: - - .. math:: - output = img * alpha + gray\_img * beta + gamma - - Args: - img (ndarray): The input source image. - alpha (int | float): Weight for the source image. Default 1. - beta (int | float): Weight for the converted gray image. - If None, it's assigned the value (1 - `alpha`). - gamma (int | float): Scalar added to each sum. - Same as :func:`cv2.addWeighted`. Default 0. - - Returns: - ndarray: Colored image which has the same size and dtype as input. - """ - gray_img = bgr2gray(img) - gray_img = np.tile(gray_img[..., None], [1, 1, 3]) - if beta is None: - beta = 1 - alpha - colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma) - if not colored_img.dtype == np.uint8: - # Note when the dtype of `img` is not the default `np.uint8` - # (e.g. np.float32), the value in `colored_img` got from cv2 - # is not guaranteed to be in range [0, 255], so here clip - # is needed. - colored_img = np.clip(colored_img, 0, 255) - return colored_img - - -def imequalize(img): - """Equalize the image histogram. - - This function applies a non-linear mapping to the input image, - in order to create a uniform distribution of grayscale values - in the output image. - - Args: - img (ndarray): Image to be equalized. - - Returns: - ndarray: The equalized image. - """ - - def _scale_channel(im, c): - """Scale the data in the corresponding channel.""" - im = im[:, :, c] - # Compute the histogram of the image channel. - histo = np.histogram(im, 256, (0, 255))[0] - # For computing the step, filter out the nonzeros. - nonzero_histo = histo[histo > 0] - step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255 - if not step: - lut = np.array(range(256)) - else: - # Compute the cumulative sum, shifted by step // 2 - # and then normalized by step. - lut = (np.cumsum(histo) + (step // 2)) // step - # Shift lut, prepending with 0. - lut = np.concatenate([[0], lut[:-1]], 0) - # handle potential integer overflow - lut[lut > 255] = 255 - # If step is zero, return the original image. - # Otherwise, index from lut. - return np.where(np.equal(step, 0), im, lut[im]) - - # Scales each channel independently and then stacks - # the result. - s1 = _scale_channel(img, 0) - s2 = _scale_channel(img, 1) - s3 = _scale_channel(img, 2) - equalized_img = np.stack([s1, s2, s3], axis=-1) - return equalized_img.astype(img.dtype) - - -def adjust_brightness(img, factor=1.): - """Adjust image brightness. - - This function controls the brightness of an image. An - enhancement factor of 0.0 gives a black image. - A factor of 1.0 gives the original image. This function - blends the source image and the degenerated black image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be brightened. - factor (float): A value controls the enhancement. - Factor 1.0 returns the original image, lower - factors mean less color (brightness, contrast, - etc), and higher values more. Default 1. - - Returns: - ndarray: The brightened image. - """ - degenerated = np.zeros_like(img) - # Note manually convert the dtype to np.float32, to - # achieve as close results as PIL.ImageEnhance.Brightness. - # Set beta=1-factor, and gamma=0 - brightened_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - brightened_img = np.clip(brightened_img, 0, 255) - return brightened_img.astype(img.dtype) - - -def adjust_contrast(img, factor=1.): - """Adjust image contrast. - - This function controls the contrast of an image. An - enhancement factor of 0.0 gives a solid grey - image. A factor of 1.0 gives the original image. It - blends the source image and the degenerated mean image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be contrasted. BGR order. - factor (float): Same as :func:`mmcv.adjust_brightness`. - - Returns: - ndarray: The contrasted image. - """ - gray_img = bgr2gray(img) - hist = np.histogram(gray_img, 256, (0, 255))[0] - mean = round(np.sum(gray_img) / np.sum(hist)) - degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype) - degenerated = gray2bgr(degenerated) - contrasted_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - contrasted_img = np.clip(contrasted_img, 0, 255) - return contrasted_img.astype(img.dtype) - - -def auto_contrast(img, cutoff=0): - """Auto adjust image contrast. - - This function maximize (normalize) image contrast by first removing cutoff - percent of the lightest and darkest pixels from the histogram and remapping - the image so that the darkest pixel becomes black (0), and the lightest - becomes white (255). - - Args: - img (ndarray): Image to be contrasted. BGR order. - cutoff (int | float | tuple): The cutoff percent of the lightest and - darkest pixels to be removed. If given as tuple, it shall be - (low, high). Otherwise, the single value will be used for both. - Defaults to 0. - - Returns: - ndarray: The contrasted image. - """ - - def _auto_contrast_channel(im, c, cutoff): - im = im[:, :, c] - # Compute the histogram of the image channel. - histo = np.histogram(im, 256, (0, 255))[0] - # Remove cut-off percent pixels from histo - histo_sum = np.cumsum(histo) - cut_low = histo_sum[-1] * cutoff[0] // 100 - cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 - histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low - histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) - - # Compute mapping - low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] - # If all the values have been cut off, return the origin img - if low >= high: - return im - scale = 255.0 / (high - low) - offset = -low * scale - lut = np.array(range(256)) - lut = lut * scale + offset - lut = np.clip(lut, 0, 255) - return lut[im] - - if isinstance(cutoff, (int, float)): - cutoff = (cutoff, cutoff) - else: - assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ - f'float or tuple, but got {type(cutoff)} instead.' - # Auto adjusts contrast for each channel independently and then stacks - # the result. - s1 = _auto_contrast_channel(img, 0, cutoff) - s2 = _auto_contrast_channel(img, 1, cutoff) - s3 = _auto_contrast_channel(img, 2, cutoff) - contrasted_img = np.stack([s1, s2, s3], axis=-1) - return contrasted_img.astype(img.dtype) - - -def adjust_sharpness(img, factor=1., kernel=None): - """Adjust image sharpness. - - This function controls the sharpness of an image. An - enhancement factor of 0.0 gives a blurred image. A - factor of 1.0 gives the original image. And a factor - of 2.0 gives a sharpened image. It blends the source - image and the degenerated mean image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be sharpened. BGR order. - factor (float): Same as :func:`mmcv.adjust_brightness`. - kernel (np.ndarray, optional): Filter kernel to be applied on the img - to obtain the degenerated img. Defaults to None. - - Note: - No value sanity check is enforced on the kernel set by users. So with - an inappropriate kernel, the ``adjust_sharpness`` may fail to perform - the function its name indicates but end up performing whatever - transform determined by the kernel. - - Returns: - ndarray: The sharpened image. - """ - - if kernel is None: - # adopted from PIL.ImageFilter.SMOOTH - kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 - assert isinstance(kernel, np.ndarray), \ - f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' - assert kernel.ndim == 2, \ - f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' - - degenerated = cv2.filter2D(img, -1, kernel) - sharpened_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - sharpened_img = np.clip(sharpened_img, 0, 255) - return sharpened_img.astype(img.dtype) - - -def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True): - """AlexNet-style PCA jitter. - - This data augmentation is proposed in `ImageNet Classification with Deep - Convolutional Neural Networks - `_. - - Args: - img (ndarray): Image to be adjusted lighting. BGR order. - eigval (ndarray): the eigenvalue of the convariance matrix of pixel - values, respectively. - eigvec (ndarray): the eigenvector of the convariance matrix of pixel - values, respectively. - alphastd (float): The standard deviation for distribution of alpha. - Defaults to 0.1 - to_rgb (bool): Whether to convert img to rgb. - - Returns: - ndarray: The adjusted image. - """ - assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \ - f'eigval and eigvec should both be of type np.ndarray, got ' \ - f'{type(eigval)} and {type(eigvec)} instead.' - - assert eigval.ndim == 1 and eigvec.ndim == 2 - assert eigvec.shape == (3, eigval.shape[0]) - n_eigval = eigval.shape[0] - assert isinstance(alphastd, float), 'alphastd should be of type float, ' \ - f'got {type(alphastd)} instead.' - - img = img.copy().astype(np.float32) - if to_rgb: - cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace - - alpha = np.random.normal(0, alphastd, n_eigval) - alter = eigvec \ - * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \ - * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval)) - alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape) - img_adjusted = img + alter - return img_adjusted - - -def lut_transform(img, lut_table): - """Transform array by look-up table. - - The function lut_transform fills the output array with values from the - look-up table. Indices of the entries are taken from the input array. - - Args: - img (ndarray): Image to be transformed. - lut_table (ndarray): look-up table of 256 elements; in case of - multi-channel input array, the table should either have a single - channel (in this case the same table is used for all channels) or - the same number of channels as in the input array. - - Returns: - ndarray: The transformed image. - """ - assert isinstance(img, np.ndarray) - assert 0 <= np.min(img) and np.max(img) <= 255 - assert isinstance(lut_table, np.ndarray) - assert lut_table.shape == (256, ) - - return cv2.LUT(np.array(img, dtype=np.uint8), lut_table) - - -def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)): - """Use CLAHE method to process the image. - - See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. - Graphics Gems, 1994:474-485.` for more information. - - Args: - img (ndarray): Image to be processed. - clip_limit (float): Threshold for contrast limiting. Default: 40.0. - tile_grid_size (tuple[int]): Size of grid for histogram equalization. - Input image will be divided into equally sized rectangular tiles. - It defines the number of tiles in row and column. Default: (8, 8). - - Returns: - ndarray: The processed image. - """ - assert isinstance(img, np.ndarray) - assert img.ndim == 2 - assert isinstance(clip_limit, (float, int)) - assert is_tuple_of(tile_grid_size, int) - assert len(tile_grid_size) == 2 - - clahe = cv2.createCLAHE(clip_limit, tile_grid_size) - return clahe.apply(np.array(img, dtype=np.uint8)) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/utils/parrots_jit.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/utils/parrots_jit.py deleted file mode 100644 index 61873f6dbb9b10ed972c90aa8faa321e3cb3249e..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/utils/parrots_jit.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os - -from .parrots_wrapper import TORCH_VERSION - -parrots_jit_option = os.getenv('PARROTS_JIT_OPTION') - -if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON': - from parrots.jit import pat as jit -else: - - def jit(func=None, - check_input=None, - full_shape=True, - derivate=False, - coderize=False, - optimize=False): - - def wrapper(func): - - def wrapper_inner(*args, **kargs): - return func(*args, **kargs) - - return wrapper_inner - - if func is None: - return wrapper - else: - return func - - -if TORCH_VERSION == 'parrots': - from parrots.utils.tester import skip_no_elena -else: - - def skip_no_elena(func): - - def wrapper(*args, **kargs): - return func(*args, **kargs) - - return wrapper diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/common.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/common.py deleted file mode 100644 index aa69a6a6546030aee818b195a0fbb399d5b776f6..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/data/common.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import copy -import itertools -import logging -import numpy as np -import pickle -import random -from typing import Callable, Union -import torch -import torch.utils.data as data -from torch.utils.data.sampler import Sampler - -from annotator.oneformer.detectron2.utils.serialize import PicklableWrapper - -__all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"] - -logger = logging.getLogger(__name__) - - -def _shard_iterator_dataloader_worker(iterable): - # Shard the iterable if we're currently inside pytorch dataloader worker. - worker_info = data.get_worker_info() - if worker_info is None or worker_info.num_workers == 1: - # do nothing - yield from iterable - else: - yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers) - - -class _MapIterableDataset(data.IterableDataset): - """ - Map a function over elements in an IterableDataset. - - Similar to pytorch's MapIterDataPipe, but support filtering when map_func - returns None. - - This class is not public-facing. Will be called by `MapDataset`. - """ - - def __init__(self, dataset, map_func): - self._dataset = dataset - self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work - - def __len__(self): - return len(self._dataset) - - def __iter__(self): - for x in map(self._map_func, self._dataset): - if x is not None: - yield x - - -class MapDataset(data.Dataset): - """ - Map a function over the elements in a dataset. - """ - - def __init__(self, dataset, map_func): - """ - Args: - dataset: a dataset where map function is applied. Can be either - map-style or iterable dataset. When given an iterable dataset, - the returned object will also be an iterable dataset. - map_func: a callable which maps the element in dataset. map_func can - return None to skip the data (e.g. in case of errors). - How None is handled depends on the style of `dataset`. - If `dataset` is map-style, it randomly tries other elements. - If `dataset` is iterable, it skips the data and tries the next. - """ - self._dataset = dataset - self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work - - self._rng = random.Random(42) - self._fallback_candidates = set(range(len(dataset))) - - def __new__(cls, dataset, map_func): - is_iterable = isinstance(dataset, data.IterableDataset) - if is_iterable: - return _MapIterableDataset(dataset, map_func) - else: - return super().__new__(cls) - - def __getnewargs__(self): - return self._dataset, self._map_func - - def __len__(self): - return len(self._dataset) - - def __getitem__(self, idx): - retry_count = 0 - cur_idx = int(idx) - - while True: - data = self._map_func(self._dataset[cur_idx]) - if data is not None: - self._fallback_candidates.add(cur_idx) - return data - - # _map_func fails for this idx, use a random new index from the pool - retry_count += 1 - self._fallback_candidates.discard(cur_idx) - cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0] - - if retry_count >= 3: - logger = logging.getLogger(__name__) - logger.warning( - "Failed to apply `_map_func` for idx: {}, retry count: {}".format( - idx, retry_count - ) - ) - - -class _TorchSerializedList(object): - """ - A list-like object whose items are serialized and stored in a torch tensor. When - launching a process that uses TorchSerializedList with "fork" start method, - the subprocess can read the same buffer without triggering copy-on-access. When - launching a process that uses TorchSerializedList with "spawn/forkserver" start - method, the list will be pickled by a special ForkingPickler registered by PyTorch - that moves data to shared memory. In both cases, this allows parent and child - processes to share RAM for the list data, hence avoids the issue in - https://github.com/pytorch/pytorch/issues/13246. - - See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/ - on how it works. - """ - - def __init__(self, lst: list): - self._lst = lst - - def _serialize(data): - buffer = pickle.dumps(data, protocol=-1) - return np.frombuffer(buffer, dtype=np.uint8) - - logger.info( - "Serializing {} elements to byte tensors and concatenating them all ...".format( - len(self._lst) - ) - ) - self._lst = [_serialize(x) for x in self._lst] - self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64) - self._addr = torch.from_numpy(np.cumsum(self._addr)) - self._lst = torch.from_numpy(np.concatenate(self._lst)) - logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2)) - - def __len__(self): - return len(self._addr) - - def __getitem__(self, idx): - start_addr = 0 if idx == 0 else self._addr[idx - 1].item() - end_addr = self._addr[idx].item() - bytes = memoryview(self._lst[start_addr:end_addr].numpy()) - - # @lint-ignore PYTHONPICKLEISBAD - return pickle.loads(bytes) - - -_DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList - - -@contextlib.contextmanager -def set_default_dataset_from_list_serialize_method(new): - """ - Context manager for using custom serialize function when creating DatasetFromList - """ - - global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD - orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD - _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new - yield - _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig - - -class DatasetFromList(data.Dataset): - """ - Wrap a list to a torch Dataset. It produces elements of the list as data. - """ - - def __init__( - self, - lst: list, - copy: bool = True, - serialize: Union[bool, Callable] = True, - ): - """ - Args: - lst (list): a list which contains elements to produce. - copy (bool): whether to deepcopy the element when producing it, - so that the result can be modified in place without affecting the - source in the list. - serialize (bool or callable): whether to serialize the stroage to other - backend. If `True`, the default serialize method will be used, if given - a callable, the callable will be used as serialize method. - """ - self._lst = lst - self._copy = copy - if not isinstance(serialize, (bool, Callable)): - raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}") - self._serialize = serialize is not False - - if self._serialize: - serialize_method = ( - serialize - if isinstance(serialize, Callable) - else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD - ) - logger.info(f"Serializing the dataset using: {serialize_method}") - self._lst = serialize_method(self._lst) - - def __len__(self): - return len(self._lst) - - def __getitem__(self, idx): - if self._copy and not self._serialize: - return copy.deepcopy(self._lst[idx]) - else: - return self._lst[idx] - - -class ToIterableDataset(data.IterableDataset): - """ - Convert an old indices-based (also called map-style) dataset - to an iterable-style dataset. - """ - - def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True): - """ - Args: - dataset: an old-style dataset with ``__getitem__`` - sampler: a cheap iterable that produces indices to be applied on ``dataset``. - shard_sampler: whether to shard the sampler based on the current pytorch data loader - worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple - workers, it is responsible for sharding its data based on worker id so that workers - don't produce identical data. - - Most samplers (like our TrainingSampler) do not shard based on dataloader worker id - and this argument should be set to True. But certain samplers may be already - sharded, in that case this argument should be set to False. - """ - assert not isinstance(dataset, data.IterableDataset), dataset - assert isinstance(sampler, Sampler), sampler - self.dataset = dataset - self.sampler = sampler - self.shard_sampler = shard_sampler - - def __iter__(self): - if not self.shard_sampler: - sampler = self.sampler - else: - # With map-style dataset, `DataLoader(dataset, sampler)` runs the - # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))` - # will run sampler in every of the N worker. So we should only keep 1/N of the ids on - # each worker. The assumption is that sampler is cheap to iterate so it's fine to - # discard ids in workers. - sampler = _shard_iterator_dataloader_worker(self.sampler) - for idx in sampler: - yield self.dataset[idx] - - def __len__(self): - return len(self.sampler) - - -class AspectRatioGroupedDataset(data.IterableDataset): - """ - Batch data that have similar aspect ratio together. - In this implementation, images whose aspect ratio < (or >) 1 will - be batched together. - This improves training speed because the images then need less padding - to form a batch. - - It assumes the underlying dataset produces dicts with "width" and "height" keys. - It will then produce a list of original dicts with length = batch_size, - all with similar aspect ratios. - """ - - def __init__(self, dataset, batch_size): - """ - Args: - dataset: an iterable. Each element must be a dict with keys - "width" and "height", which will be used to batch data. - batch_size (int): - """ - self.dataset = dataset - self.batch_size = batch_size - self._buckets = [[] for _ in range(2)] - # Hard-coded two aspect ratio groups: w > h and w < h. - # Can add support for more aspect ratio groups, but doesn't seem useful - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - bucket_id = 0 if w > h else 1 - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_size: - data = bucket[:] - # Clear bucket first, because code after yield is not - # guaranteed to execute - del bucket[:] - yield data diff --git a/spaces/coyotte508/test-req/README.md b/spaces/coyotte508/test-req/README.md deleted file mode 100644 index a517f7c4a5dcc2885605eb12040f4cec8cea4e8b..0000000000000000000000000000000000000000 --- a/spaces/coyotte508/test-req/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Test Req -emoji: 📊 -colorFrom: gray -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cscan/CodeFormer/CodeFormer/basicsr/archs/arch_util.py b/spaces/cscan/CodeFormer/CodeFormer/basicsr/archs/arch_util.py deleted file mode 100644 index bad45ab34e901c47fb539152fca714a3795b0de2..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/basicsr/archs/arch_util.py +++ /dev/null @@ -1,318 +0,0 @@ -import collections.abc -import math -import torch -import torchvision -import warnings -from distutils.version import LooseVersion -from itertools import repeat -from torch import nn as nn -from torch.nn import functional as F -from torch.nn import init as init -from torch.nn.modules.batchnorm import _BatchNorm - -from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv -from basicsr.utils import get_root_logger - - -@torch.no_grad() -def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs): - """Initialize network weights. - - Args: - module_list (list[nn.Module] | nn.Module): Modules to be initialized. - scale (float): Scale initialized weights, especially for residual - blocks. Default: 1. - bias_fill (float): The value to fill bias. Default: 0 - kwargs (dict): Other arguments for initialization function. - """ - if not isinstance(module_list, list): - module_list = [module_list] - for module in module_list: - for m in module.modules(): - if isinstance(m, nn.Conv2d): - init.kaiming_normal_(m.weight, **kwargs) - m.weight.data *= scale - if m.bias is not None: - m.bias.data.fill_(bias_fill) - elif isinstance(m, nn.Linear): - init.kaiming_normal_(m.weight, **kwargs) - m.weight.data *= scale - if m.bias is not None: - m.bias.data.fill_(bias_fill) - elif isinstance(m, _BatchNorm): - init.constant_(m.weight, 1) - if m.bias is not None: - m.bias.data.fill_(bias_fill) - - -def make_layer(basic_block, num_basic_block, **kwarg): - """Make layers by stacking the same blocks. - - Args: - basic_block (nn.module): nn.module class for basic block. - num_basic_block (int): number of blocks. - - Returns: - nn.Sequential: Stacked blocks in nn.Sequential. - """ - layers = [] - for _ in range(num_basic_block): - layers.append(basic_block(**kwarg)) - return nn.Sequential(*layers) - - -class ResidualBlockNoBN(nn.Module): - """Residual block without BN. - - It has a style of: - ---Conv-ReLU-Conv-+- - |________________| - - Args: - num_feat (int): Channel number of intermediate features. - Default: 64. - res_scale (float): Residual scale. Default: 1. - pytorch_init (bool): If set to True, use pytorch default init, - otherwise, use default_init_weights. Default: False. - """ - - def __init__(self, num_feat=64, res_scale=1, pytorch_init=False): - super(ResidualBlockNoBN, self).__init__() - self.res_scale = res_scale - self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True) - self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True) - self.relu = nn.ReLU(inplace=True) - - if not pytorch_init: - default_init_weights([self.conv1, self.conv2], 0.1) - - def forward(self, x): - identity = x - out = self.conv2(self.relu(self.conv1(x))) - return identity + out * self.res_scale - - -class Upsample(nn.Sequential): - """Upsample module. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - """ - - def __init__(self, scale, num_feat): - m = [] - if (scale & (scale - 1)) == 0: # scale = 2^n - for _ in range(int(math.log(scale, 2))): - m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(2)) - elif scale == 3: - m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(3)) - else: - raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.') - super(Upsample, self).__init__(*m) - - -def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True): - """Warp an image or feature map with optical flow. - - Args: - x (Tensor): Tensor with size (n, c, h, w). - flow (Tensor): Tensor with size (n, h, w, 2), normal value. - interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'. - padding_mode (str): 'zeros' or 'border' or 'reflection'. - Default: 'zeros'. - align_corners (bool): Before pytorch 1.3, the default value is - align_corners=True. After pytorch 1.3, the default value is - align_corners=False. Here, we use the True as default. - - Returns: - Tensor: Warped image or feature map. - """ - assert x.size()[-2:] == flow.size()[1:3] - _, _, h, w = x.size() - # create mesh grid - grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x)) - grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2 - grid.requires_grad = False - - vgrid = grid + flow - # scale grid to [-1,1] - vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0 - vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0 - vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3) - output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners) - - # TODO, what if align_corners=False - return output - - -def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False): - """Resize a flow according to ratio or shape. - - Args: - flow (Tensor): Precomputed flow. shape [N, 2, H, W]. - size_type (str): 'ratio' or 'shape'. - sizes (list[int | float]): the ratio for resizing or the final output - shape. - 1) The order of ratio should be [ratio_h, ratio_w]. For - downsampling, the ratio should be smaller than 1.0 (i.e., ratio - < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e., - ratio > 1.0). - 2) The order of output_size should be [out_h, out_w]. - interp_mode (str): The mode of interpolation for resizing. - Default: 'bilinear'. - align_corners (bool): Whether align corners. Default: False. - - Returns: - Tensor: Resized flow. - """ - _, _, flow_h, flow_w = flow.size() - if size_type == 'ratio': - output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1]) - elif size_type == 'shape': - output_h, output_w = sizes[0], sizes[1] - else: - raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.') - - input_flow = flow.clone() - ratio_h = output_h / flow_h - ratio_w = output_w / flow_w - input_flow[:, 0, :, :] *= ratio_w - input_flow[:, 1, :, :] *= ratio_h - resized_flow = F.interpolate( - input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners) - return resized_flow - - -# TODO: may write a cpp file -def pixel_unshuffle(x, scale): - """ Pixel unshuffle. - - Args: - x (Tensor): Input feature with shape (b, c, hh, hw). - scale (int): Downsample ratio. - - Returns: - Tensor: the pixel unshuffled feature. - """ - b, c, hh, hw = x.size() - out_channel = c * (scale**2) - assert hh % scale == 0 and hw % scale == 0 - h = hh // scale - w = hw // scale - x_view = x.view(b, c, h, scale, w, scale) - return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) - - -class DCNv2Pack(ModulatedDeformConvPack): - """Modulated deformable conv for deformable alignment. - - Different from the official DCNv2Pack, which generates offsets and masks - from the preceding features, this DCNv2Pack takes another different - features to generate offsets and masks. - - Ref: - Delving Deep into Deformable Alignment in Video Super-Resolution. - """ - - def forward(self, x, feat): - out = self.conv_offset(feat) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - - offset_absmean = torch.mean(torch.abs(offset)) - if offset_absmean > 50: - logger = get_root_logger() - logger.warning(f'Offset abs mean is {offset_absmean}, larger than 50.') - - if LooseVersion(torchvision.__version__) >= LooseVersion('0.9.0'): - return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding, - self.dilation, mask) - else: - return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, - self.dilation, self.groups, self.deformable_groups) - - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - # From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py - # Cut & paste from PyTorch official master until it's in a few official releases - RW - # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn( - 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' - 'The distribution of values may be incorrect.', - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - low = norm_cdf((a - mean) / std) - up = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [low, up], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * low - 1, 2 * up - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. - - From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py - - The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - - Args: - tensor: an n-dimensional `torch.Tensor` - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - a: the minimum cutoff value - b: the maximum cutoff value - - Examples: - >>> w = torch.empty(3, 5) - >>> nn.init.trunc_normal_(w) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) - - -# From PyTorch -def _ntuple(n): - - def parse(x): - if isinstance(x, collections.abc.Iterable): - return x - return tuple(repeat(x, n)) - - return parse - - -to_1tuple = _ntuple(1) -to_2tuple = _ntuple(2) -to_3tuple = _ntuple(3) -to_4tuple = _ntuple(4) -to_ntuple = _ntuple \ No newline at end of file diff --git a/spaces/cscan/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/datasets.py b/spaces/cscan/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/datasets.py deleted file mode 100644 index e672b136f56fd6b05038e24377908361a54fe519..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/CodeFormer/facelib/detection/yolov5face/utils/datasets.py +++ /dev/null @@ -1,35 +0,0 @@ -import cv2 -import numpy as np - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale_fill=False, scaleup=True): - # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding - elif scale_fill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) diff --git a/spaces/csuhan/opendet2/opendet2/engine/defaults.py b/spaces/csuhan/opendet2/opendet2/engine/defaults.py deleted file mode 100644 index b7ae381add473c83b8096ee531cae8cb9a404cff..0000000000000000000000000000000000000000 --- a/spaces/csuhan/opendet2/opendet2/engine/defaults.py +++ /dev/null @@ -1,441 +0,0 @@ -import logging -import os -import weakref -from collections import OrderedDict -from typing import Dict - -import torch -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import CfgNode -from detectron2.data import MetadataCatalog -from detectron2.engine import (AMPTrainer, SimpleTrainer, - TrainerBase, create_ddp_model, hooks, create_ddp_model, default_writers) -from detectron2.evaluation import (DatasetEvaluator, DatasetEvaluators, - inference_on_dataset, print_csv_format, - verify_results) -from detectron2.modeling import GeneralizedRCNNWithTTA, build_model -from detectron2.solver import build_lr_scheduler -from detectron2.utils import comm -from detectron2.utils.logger import setup_logger -from fvcore.nn.precise_bn import get_bn_modules - -from ..data import build_detection_test_loader, build_detection_train_loader -from ..evaluation import PascalVOCDetectionEvaluator -from ..solver import build_optimizer - - -class OpenDetTrainer(TrainerBase): - """ - A trainer with default training logic. It does the following: - - 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader - defined by the given config. Create a LR scheduler defined by the config. - 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when - `resume_or_load` is called. - 3. Register a few common hooks defined by the config. - - It is created to simplify the **standard model training workflow** and reduce code boilerplate - for users who only need the standard training workflow, with standard features. - It means this class makes *many assumptions* about your training logic that - may easily become invalid in a new research. In fact, any assumptions beyond those made in the - :class:`SimpleTrainer` are too much for research. - - The code of this class has been annotated about restrictive assumptions it makes. - When they do not work for you, you're encouraged to: - - 1. Overwrite methods of this class, OR: - 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and - nothing else. You can then add your own hooks if needed. OR: - 3. Write your own training loop similar to `tools/plain_train_net.py`. - - See the :doc:`/tutorials/training` tutorials for more details. - - Note that the behavior of this class, like other functions/classes in - this file, is not stable, since it is meant to represent the "common default behavior". - It is only guaranteed to work well with the standard models and training workflow in detectron2. - To obtain more stable behavior, write your own training logic with other public APIs. - - Examples: - :: - trainer = DefaultTrainer(cfg) - trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS - trainer.train() - - Attributes: - scheduler: - checkpointer (DetectionCheckpointer): - cfg (CfgNode): - """ - - def __init__(self, cfg): - """ - Args: - cfg (CfgNode): - """ - super().__init__() - logger = logging.getLogger("detectron2") - # setup_logger is not called for d2 - if not logger.isEnabledFor(logging.INFO): - setup_logger() - cfg = OpenDetTrainer.auto_scale_workers(cfg, comm.get_world_size()) - - # Assume these objects must be constructed in this order. - model = self.build_model(cfg) - optimizer = self.build_optimizer(cfg, model) - data_loader = self.build_train_loader(cfg) - - model = create_ddp_model( - model, broadcast_buffers=False, find_unused_parameters=True) - # model = create_ddp_model(model, broadcast_buffers=False) - self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( - model, data_loader, optimizer - ) - - self.scheduler = self.build_lr_scheduler(cfg, optimizer) - self.checkpointer = DetectionCheckpointer( - # Assume you want to save checkpoints together with logs/statistics - model, - cfg.OUTPUT_DIR, - trainer=weakref.proxy(self), - ) - self.start_iter = 0 - self.max_iter = cfg.SOLVER.MAX_ITER - self.cfg = cfg - - self.register_hooks(self.build_hooks()) - - def resume_or_load(self, resume=True): - """ - If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by - a `last_checkpoint` file), resume from the file. Resuming means loading all - available states (eg. optimizer and scheduler) and update iteration counter - from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used. - - Otherwise, this is considered as an independent training. The method will load model - weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start - from iteration 0. - - Args: - resume (bool): whether to do resume or not - """ - self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) - if resume and self.checkpointer.has_checkpoint(): - # The checkpoint stores the training iteration that just finished, thus we start - # at the next iteration - self.start_iter = self.iter + 1 - - def build_hooks(self): - """ - Build a list of default hooks, including timing, evaluation, - checkpointing, lr scheduling, precise BN, writing events. - - Returns: - list[HookBase]: - """ - cfg = self.cfg.clone() - cfg.defrost() - cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN - - ret = [ - hooks.IterationTimer(), - hooks.LRScheduler(), - hooks.PreciseBN( - # Run at the same freq as (but before) evaluation. - cfg.TEST.EVAL_PERIOD, - self.model, - # Build a new data loader to not affect training - self.build_train_loader(cfg), - cfg.TEST.PRECISE_BN.NUM_ITER, - ) - if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) - else None, - ] - - # Do PreciseBN before checkpointer, because it updates the model and need to - # be saved by checkpointer. - # This is not always the best: if checkpointing has a different frequency, - # some checkpoints may have more precise statistics than others. - if comm.is_main_process(): - ret.append(hooks.PeriodicCheckpointer( - self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) - - def test_and_save_results(): - self._last_eval_results = self.test(self.cfg, self.model) - return self._last_eval_results - - # Do evaluation after checkpointer, because then if it fails, - # we can use the saved checkpoint to debug. - ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) - - if comm.is_main_process(): - # Here the default print/log frequency of each writer is used. - # run writers in the end, so that evaluation metrics are written - ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) - return ret - - def build_writers(self): - """ - Build a list of writers to be used using :func:`default_writers()`. - If you'd like a different list of writers, you can overwrite it in - your trainer. - - Returns: - list[EventWriter]: a list of :class:`EventWriter` objects. - """ - return default_writers(self.cfg.OUTPUT_DIR, self.max_iter) - - def train(self): - """ - Run training. - - Returns: - OrderedDict of results, if evaluation is enabled. Otherwise None. - """ - super().train(self.start_iter, self.max_iter) - if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): - assert hasattr( - self, "_last_eval_results" - ), "No evaluation results obtained during training!" - verify_results(self.cfg, self._last_eval_results) - return self._last_eval_results - - def run_step(self): - self._trainer.iter = self.iter - self._trainer.run_step() - - @classmethod - def build_model(cls, cfg): - """ - Returns: - torch.nn.Module: - - It now calls :func:`detectron2.modeling.build_model`. - Overwrite it if you'd like a different model. - """ - model = build_model(cfg) - logger = logging.getLogger(__name__) - logger.info("Model:\n{}".format(model)) - return model - - @classmethod - def build_optimizer(cls, cfg, model): - """ - Returns: - torch.optim.Optimizer: - - It now calls :func:`detectron2.solver.build_optimizer`. - Overwrite it if you'd like a different optimizer. - """ - return build_optimizer(cfg, model) - - @classmethod - def build_lr_scheduler(cls, cfg, optimizer): - """ - It now calls :func:`detectron2.solver.build_lr_scheduler`. - Overwrite it if you'd like a different scheduler. - """ - return build_lr_scheduler(cfg, optimizer) - - @classmethod - def build_train_loader(cls, cfg): - """ - Returns: - iterable - - It now calls :func:`detectron2.data.build_detection_train_loader`. - Overwrite it if you'd like a different data loader. - """ - return build_detection_train_loader(cfg) - - @classmethod - def build_test_loader(cls, cfg, dataset_name): - """ - Returns: - iterable - - It now calls :func:`detectron2.data.build_detection_test_loader`. - Overwrite it if you'd like a different data loader. - """ - return build_detection_test_loader(cfg, dataset_name) - - @classmethod - def build_evaluator(cls, cfg, dataset_name, output_folder=None): - if output_folder is None: - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") - evaluator_list = [] - evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type - - if evaluator_type == "pascal_voc": - return PascalVOCDetectionEvaluator(dataset_name, cfg) - - if len(evaluator_list) == 0: - raise NotImplementedError( - "no Evaluator for the dataset {} with the type {}".format( - dataset_name, evaluator_type - ) - ) - elif len(evaluator_list) == 1: - return evaluator_list[0] - return DatasetEvaluators(evaluator_list) - - @classmethod - def test_with_TTA(cls, cfg, model): - logger = logging.getLogger("detectron2.trainer") - # In the end of training, run an evaluation with TTA - # Only support some R-CNN models. - logger.info("Running inference with test-time augmentation ...") - model = GeneralizedRCNNWithTTA(cfg, model) - evaluators = [ - cls.build_evaluator( - cfg, name, output_folder=os.path.join( - cfg.OUTPUT_DIR, "inference_TTA") - ) - for name in cfg.DATASETS.TEST - ] - res = cls.test(cfg, model, evaluators) - res = OrderedDict({k + "_TTA": v for k, v in res.items()}) - return res - - @classmethod - def test(cls, cfg, model, evaluators=None): - """ - Args: - cfg (CfgNode): - model (nn.Module): - evaluators (list[DatasetEvaluator] or None): if None, will call - :meth:`build_evaluator`. Otherwise, must have the same length as - ``cfg.DATASETS.TEST``. - - Returns: - dict: a dict of result metrics - """ - logger = logging.getLogger(__name__) - if isinstance(evaluators, DatasetEvaluator): - evaluators = [evaluators] - if evaluators is not None: - assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( - len(cfg.DATASETS.TEST), len(evaluators) - ) - - results = OrderedDict() - for idx, dataset_name in enumerate(cfg.DATASETS.TEST): - data_loader = cls.build_test_loader(cfg, dataset_name) - # When evaluators are passed in as arguments, - # implicitly assume that evaluators can be created before data_loader. - if evaluators is not None: - evaluator = evaluators[idx] - else: - try: - evaluator = cls.build_evaluator(cfg, dataset_name) - except NotImplementedError: - logger.warn( - "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " - "or implement its `build_evaluator` method." - ) - results[dataset_name] = {} - continue - results_i = inference_on_dataset(model, data_loader, evaluator) - results[dataset_name] = results_i - if comm.is_main_process(): - assert isinstance( - results_i, dict - ), "Evaluator must return a dict on the main process. Got {} instead.".format( - results_i - ) - logger.info( - "Evaluation results for {} in csv format:".format(dataset_name)) - print_csv_format(results_i) - - if len(results) == 1: - results = list(results.values())[0] - return results - - @staticmethod - def auto_scale_workers(cfg, num_workers: int): - """ - When the config is defined for certain number of workers (according to - ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of - workers currently in use, returns a new cfg where the total batch size - is scaled so that the per-GPU batch size stays the same as the - original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``. - - Other config options are also scaled accordingly: - * training steps and warmup steps are scaled inverse proportionally. - * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`. - - For example, with the original config like the following: - - .. code-block:: yaml - - IMS_PER_BATCH: 16 - BASE_LR: 0.1 - REFERENCE_WORLD_SIZE: 8 - MAX_ITER: 5000 - STEPS: (4000,) - CHECKPOINT_PERIOD: 1000 - - When this config is used on 16 GPUs instead of the reference number 8, - calling this method will return a new config with: - - .. code-block:: yaml - - IMS_PER_BATCH: 32 - BASE_LR: 0.2 - REFERENCE_WORLD_SIZE: 16 - MAX_ITER: 2500 - STEPS: (2000,) - CHECKPOINT_PERIOD: 500 - - Note that both the original config and this new config can be trained on 16 GPUs. - It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``). - - Returns: - CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``. - """ - old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE - if old_world_size == 0 or old_world_size == num_workers: - return cfg - cfg = cfg.clone() - frozen = cfg.is_frozen() - cfg.defrost() - - assert ( - cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0 - ), "Invalid REFERENCE_WORLD_SIZE in config!" - scale = num_workers / old_world_size - bs = cfg.SOLVER.IMS_PER_BATCH = int( - round(cfg.SOLVER.IMS_PER_BATCH * scale)) - lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale - max_iter = cfg.SOLVER.MAX_ITER = int( - round(cfg.SOLVER.MAX_ITER / scale)) - warmup_iter = cfg.SOLVER.WARMUP_ITERS = int( - round(cfg.SOLVER.WARMUP_ITERS / scale)) - cfg.SOLVER.STEPS = tuple(int(round(s / scale)) - for s in cfg.SOLVER.STEPS) - cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale)) - cfg.SOLVER.CHECKPOINT_PERIOD = int( - round(cfg.SOLVER.CHECKPOINT_PERIOD / scale)) - cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant - logger = logging.getLogger(__name__) - logger.info( - f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, " - f"max_iter={max_iter}, warmup={warmup_iter}." - ) - - if frozen: - cfg.freeze() - return cfg - - -# Access basic attributes from the underlying trainer -for _attr in ["model", "data_loader", "optimizer"]: - setattr( - OpenDetTrainer, - _attr, - property( - # getter - lambda self, x=_attr: getattr(self._trainer, x), - # setter - lambda self, value, x=_attr: setattr(self._trainer, x, value), - ), - ) diff --git a/spaces/cupkake14/bean_vit_classifier/app.py b/spaces/cupkake14/bean_vit_classifier/app.py deleted file mode 100644 index 021beb8126aec0492b19046f3b6915023e1315c8..0000000000000000000000000000000000000000 --- a/spaces/cupkake14/bean_vit_classifier/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr -import datasets -import transformers -import torch - -from transformers import AutoFeatureExtractor, AutoModelForImageClassification - -dataset = datasets.load_dataset('beans') - -extractor = AutoFeatureExtractor.from_pretrained("saved_model_files") -model = AutoModelForImageClassification.from_pretrained("saved_model_files") - -labels = dataset['train'].features['labels'].names - -def classify(im): - features = extractor(im, return_tensors='pt') - with torch.no_grad(): - logits = model(**features).logits - probability = torch.nn.functional.softmax(logits, dim=-1) - probs = probability[0].detach().numpy() - confidences = {label: float(probs[i]) for i, label in enumerate(labels)} - return confidences - - -gr.Interface(fn = classify, - inputs = "image", - outputs = "label", - examples = "examples", - title='Leaf classification on beans dataset', - description='Fine-tuning a ViT for bean plant health classification' - ).launch(debug=True) \ No newline at end of file diff --git a/spaces/dakaiye/dky_xuexi/crazy_functions/test_project/python/dqn/dqn.py b/spaces/dakaiye/dky_xuexi/crazy_functions/test_project/python/dqn/dqn.py deleted file mode 100644 index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000 --- a/spaces/dakaiye/dky_xuexi/crazy_functions/test_project/python/dqn/dqn.py +++ /dev/null @@ -1,245 +0,0 @@ -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -import gym -import numpy as np -import torch as th -from torch.nn import functional as F - -from stable_baselines3.common import logger -from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm -from stable_baselines3.common.preprocessing import maybe_transpose -from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule -from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update -from stable_baselines3.dqn.policies import DQNPolicy - - -class DQN(OffPolicyAlgorithm): - """ - Deep Q-Network (DQN) - - Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236 - Default hyperparameters are taken from the nature paper, - except for the optimizer and learning rate that were taken from Stable Baselines defaults. - - :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) - :param env: The environment to learn from (if registered in Gym, can be str) - :param learning_rate: The learning rate, it can be a function - of the current progress remaining (from 1 to 0) - :param buffer_size: size of the replay buffer - :param learning_starts: how many steps of the model to collect transitions for before learning starts - :param batch_size: Minibatch size for each gradient update - :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update - :param gamma: the discount factor - :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit - like ``(5, "step")`` or ``(2, "episode")``. - :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) - Set to ``-1`` means to do as many gradient steps as steps done in the environment - during the rollout. - :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer - at a cost of more complexity. - See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 - :param target_update_interval: update the target network every ``target_update_interval`` - environment steps. - :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced - :param exploration_initial_eps: initial value of random action probability - :param exploration_final_eps: final value of random action probability - :param max_grad_norm: The maximum value for the gradient clipping - :param tensorboard_log: the log location for tensorboard (if None, no logging) - :param create_eval_env: Whether to create a second environment that will be - used for evaluating the agent periodically. (Only available when passing string for the environment) - :param policy_kwargs: additional arguments to be passed to the policy on creation - :param verbose: the verbosity level: 0 no output, 1 info, 2 debug - :param seed: Seed for the pseudo random generators - :param device: Device (cpu, cuda, ...) on which the code should be run. - Setting it to auto, the code will be run on the GPU if possible. - :param _init_setup_model: Whether or not to build the network at the creation of the instance - """ - - def __init__( - self, - policy: Union[str, Type[DQNPolicy]], - env: Union[GymEnv, str], - learning_rate: Union[float, Schedule] = 1e-4, - buffer_size: int = 1000000, - learning_starts: int = 50000, - batch_size: Optional[int] = 32, - tau: float = 1.0, - gamma: float = 0.99, - train_freq: Union[int, Tuple[int, str]] = 4, - gradient_steps: int = 1, - optimize_memory_usage: bool = False, - target_update_interval: int = 10000, - exploration_fraction: float = 0.1, - exploration_initial_eps: float = 1.0, - exploration_final_eps: float = 0.05, - max_grad_norm: float = 10, - tensorboard_log: Optional[str] = None, - create_eval_env: bool = False, - policy_kwargs: Optional[Dict[str, Any]] = None, - verbose: int = 0, - seed: Optional[int] = None, - device: Union[th.device, str] = "auto", - _init_setup_model: bool = True, - ): - - super(DQN, self).__init__( - policy, - env, - DQNPolicy, - learning_rate, - buffer_size, - learning_starts, - batch_size, - tau, - gamma, - train_freq, - gradient_steps, - action_noise=None, # No action noise - policy_kwargs=policy_kwargs, - tensorboard_log=tensorboard_log, - verbose=verbose, - device=device, - create_eval_env=create_eval_env, - seed=seed, - sde_support=False, - optimize_memory_usage=optimize_memory_usage, - supported_action_spaces=(gym.spaces.Discrete,), - ) - - self.exploration_initial_eps = exploration_initial_eps - self.exploration_final_eps = exploration_final_eps - self.exploration_fraction = exploration_fraction - self.target_update_interval = target_update_interval - self.max_grad_norm = max_grad_norm - # "epsilon" for the epsilon-greedy exploration - self.exploration_rate = 0.0 - # Linear schedule will be defined in `_setup_model()` - self.exploration_schedule = None - self.q_net, self.q_net_target = None, None - - if _init_setup_model: - self._setup_model() - - def _setup_model(self) -> None: - super(DQN, self)._setup_model() - self._create_aliases() - self.exploration_schedule = get_linear_fn( - self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction - ) - - def _create_aliases(self) -> None: - self.q_net = self.policy.q_net - self.q_net_target = self.policy.q_net_target - - def _on_step(self) -> None: - """ - Update the exploration rate and target network if needed. - This method is called in ``collect_rollouts()`` after each step in the environment. - """ - if self.num_timesteps % self.target_update_interval == 0: - polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau) - - self.exploration_rate = self.exploration_schedule(self._current_progress_remaining) - logger.record("rollout/exploration rate", self.exploration_rate) - - def train(self, gradient_steps: int, batch_size: int = 100) -> None: - # Update learning rate according to schedule - self._update_learning_rate(self.policy.optimizer) - - losses = [] - for _ in range(gradient_steps): - # Sample replay buffer - replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) - - with th.no_grad(): - # Compute the next Q-values using the target network - next_q_values = self.q_net_target(replay_data.next_observations) - # Follow greedy policy: use the one with the highest value - next_q_values, _ = next_q_values.max(dim=1) - # Avoid potential broadcast issue - next_q_values = next_q_values.reshape(-1, 1) - # 1-step TD target - target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values - - # Get current Q-values estimates - current_q_values = self.q_net(replay_data.observations) - - # Retrieve the q-values for the actions from the replay buffer - current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long()) - - # Compute Huber loss (less sensitive to outliers) - loss = F.smooth_l1_loss(current_q_values, target_q_values) - losses.append(loss.item()) - - # Optimize the policy - self.policy.optimizer.zero_grad() - loss.backward() - # Clip gradient norm - th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) - self.policy.optimizer.step() - - # Increase update counter - self._n_updates += gradient_steps - - logger.record("train/n_updates", self._n_updates, exclude="tensorboard") - logger.record("train/loss", np.mean(losses)) - - def predict( - self, - observation: np.ndarray, - state: Optional[np.ndarray] = None, - mask: Optional[np.ndarray] = None, - deterministic: bool = False, - ) -> Tuple[np.ndarray, Optional[np.ndarray]]: - """ - Overrides the base_class predict function to include epsilon-greedy exploration. - - :param observation: the input observation - :param state: The last states (can be None, used in recurrent policies) - :param mask: The last masks (can be None, used in recurrent policies) - :param deterministic: Whether or not to return deterministic actions. - :return: the model's action and the next state - (used in recurrent policies) - """ - if not deterministic and np.random.rand() < self.exploration_rate: - if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space): - n_batch = observation.shape[0] - action = np.array([self.action_space.sample() for _ in range(n_batch)]) - else: - action = np.array(self.action_space.sample()) - else: - action, state = self.policy.predict(observation, state, mask, deterministic) - return action, state - - def learn( - self, - total_timesteps: int, - callback: MaybeCallback = None, - log_interval: int = 4, - eval_env: Optional[GymEnv] = None, - eval_freq: int = -1, - n_eval_episodes: int = 5, - tb_log_name: str = "DQN", - eval_log_path: Optional[str] = None, - reset_num_timesteps: bool = True, - ) -> OffPolicyAlgorithm: - - return super(DQN, self).learn( - total_timesteps=total_timesteps, - callback=callback, - log_interval=log_interval, - eval_env=eval_env, - eval_freq=eval_freq, - n_eval_episodes=n_eval_episodes, - tb_log_name=tb_log_name, - eval_log_path=eval_log_path, - reset_num_timesteps=reset_num_timesteps, - ) - - def _excluded_save_params(self) -> List[str]: - return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"] - - def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: - state_dicts = ["policy", "policy.optimizer"] - - return state_dicts, [] diff --git a/spaces/danterivers/music-generation-samples/audiocraft/modules/codebooks_patterns.py b/spaces/danterivers/music-generation-samples/audiocraft/modules/codebooks_patterns.py deleted file mode 100644 index c5b35cbea8cff84aa56116dbdd860fc72a913a13..0000000000000000000000000000000000000000 --- a/spaces/danterivers/music-generation-samples/audiocraft/modules/codebooks_patterns.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import namedtuple -from dataclasses import dataclass -from functools import lru_cache -import logging -import typing as tp - -from abc import ABC, abstractmethod -import torch - -LayoutCoord = namedtuple('LayoutCoord', ['t', 'q']) # (timestep, codebook index) -PatternLayout = tp.List[tp.List[LayoutCoord]] # Sequence of coordinates -logger = logging.getLogger(__name__) - - -@dataclass -class Pattern: - """Base implementation of a pattern over a sequence with multiple codebooks. - - The codebook pattern consists in a layout, defining for each sequence step - the list of coordinates of each codebook timestep in the resulting interleaved sequence. - The first item of the pattern is always an empty list in order to properly insert a special token - to start with. For convenience, we also keep track of ``n_q`` the number of codebooks used for the pattern - and ``timesteps`` the number of timesteps corresponding to the original sequence. - - The pattern provides convenient methods to build and revert interleaved sequences from it: - ``build_pattern_sequence`` maps a given a dense input tensor of multi-codebook sequence from [B, K, T] - to the interleaved sequence of shape [B, K, S] applying the pattern, with S being the batch size, - K being the number of codebooks, T the number of original timesteps and S the number of sequence steps - for the output sequence. The unfilled positions are replaced with a special token and the built sequence - is returned along with a mask indicating valid tokens. - ``revert_pattern_sequence`` maps back an interleaved sequence of shape [B, K, S] to the original alignment - of codebooks across timesteps to an output tensor of shape [B, K, T], using again a special token and a mask - to fill and specify invalid positions if needed. - See the dedicated methods for more details. - """ - # Pattern layout, for each sequence step, we have a list of coordinates - # corresponding to the original codebook timestep and position. - # The first list is always an empty list in order to properly insert - # a special token to start with. - layout: PatternLayout - timesteps: int - n_q: int - - def __post_init__(self): - assert len(self.layout) > 0 - assert self.layout[0] == [] - self._validate_layout() - self._build_reverted_sequence_scatter_indexes = lru_cache(100)(self._build_reverted_sequence_scatter_indexes) - self._build_pattern_sequence_scatter_indexes = lru_cache(100)(self._build_pattern_sequence_scatter_indexes) - logger.info("New pattern, time steps: %d, sequence steps: %d", self.timesteps, len(self.layout)) - - def _validate_layout(self): - """Runs checks on the layout to ensure a valid pattern is defined. - A pattern is considered invalid if: - - Multiple timesteps for a same codebook are defined in the same sequence step - - The timesteps for a given codebook are not in ascending order as we advance in the sequence - (this would mean that we have future timesteps before past timesteps). - """ - q_timesteps = {q: 0 for q in range(self.n_q)} - for s, seq_coords in enumerate(self.layout): - if len(seq_coords) > 0: - qs = set() - for coord in seq_coords: - qs.add(coord.q) - last_q_timestep = q_timesteps[coord.q] - assert coord.t >= last_q_timestep, \ - f"Past timesteps are found in the sequence for codebook = {coord.q} at step {s}" - q_timesteps[coord.q] = coord.t - # each sequence step contains at max 1 coordinate per codebook - assert len(qs) == len(seq_coords), \ - f"Multiple entries for a same codebook are found at step {s}" - - @property - def num_sequence_steps(self): - return len(self.layout) - 1 - - @property - def max_delay(self): - max_t_in_seq_coords = 0 - for seq_coords in self.layout[1:]: - for coords in seq_coords: - max_t_in_seq_coords = max(max_t_in_seq_coords, coords.t + 1) - return max_t_in_seq_coords - self.timesteps - - @property - def valid_layout(self): - valid_step = len(self.layout) - self.max_delay - return self.layout[:valid_step] - - def get_sequence_coords_with_timestep(self, t: int, q: tp.Optional[int] = None): - """Get codebook coordinates in the layout that corresponds to the specified timestep t - and optionally to the codebook q. Coordinates are returned as a tuple with the sequence step - and the actual codebook coordinates. - """ - assert t <= self.timesteps, "provided timesteps is greater than the pattern's number of timesteps" - if q is not None: - assert q <= self.n_q, "provided number of codebooks is greater than the pattern's number of codebooks" - coords = [] - for s, seq_codes in enumerate(self.layout): - for code in seq_codes: - if code.t == t and (q is None or code.q == q): - coords.append((s, code)) - return coords - - def get_steps_with_timestep(self, t: int, q: tp.Optional[int] = None) -> tp.List[int]: - return [step for step, coords in self.get_sequence_coords_with_timestep(t, q)] - - def get_first_step_with_timesteps(self, t: int, q: tp.Optional[int] = None) -> tp.Optional[int]: - steps_with_timesteps = self.get_steps_with_timestep(t, q) - return steps_with_timesteps[0] if len(steps_with_timesteps) > 0 else None - - def _build_pattern_sequence_scatter_indexes(self, timesteps: int, n_q: int, keep_only_valid_steps: bool, - device: tp.Union[torch.device, str] = 'cpu'): - """Build scatter indexes corresponding to the pattern, up to the provided sequence_steps. - - Args: - timesteps (int): Maximum number of timesteps steps to consider. - keep_only_valid_steps (bool): Restrict the pattern layout to match only valid steps. - device (Union[torch.device, str]): Device for created tensors. - Returns: - indexes (torch.Tensor): Indexes corresponding to the sequence, of shape [K, S]. - mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes, of shape [K, S]. - """ - assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}" - assert timesteps <= self.timesteps, "invalid number of timesteps used to build the sequence from the pattern" - # use the proper layout based on whether we limit ourselves to valid steps only or not, - # note that using the valid_layout will result in a truncated sequence up to the valid steps - ref_layout = self.valid_layout if keep_only_valid_steps else self.layout - # single item indexing being super slow with pytorch vs. numpy, so we use numpy here - indexes = torch.zeros(n_q, len(ref_layout), dtype=torch.long).numpy() - mask = torch.zeros(n_q, len(ref_layout), dtype=torch.bool).numpy() - # fill indexes with last sequence step value that will correspond to our special token - # the last value is n_q * timesteps as we have flattened z and append special token as the last token - # which will correspond to the index: n_q * timesteps - indexes[:] = n_q * timesteps - # iterate over the pattern and fill scattered indexes and mask - for s, sequence_coords in enumerate(ref_layout): - for coords in sequence_coords: - if coords.t < timesteps: - indexes[coords.q, s] = coords.t + coords.q * timesteps - mask[coords.q, s] = 1 - indexes = torch.from_numpy(indexes).to(device) - mask = torch.from_numpy(mask).to(device) - return indexes, mask - - def build_pattern_sequence(self, z: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False): - """Build sequence corresponding to the pattern from the input tensor z. - The sequence is built using up to sequence_steps if specified, and non-pattern - coordinates are filled with the special token. - - Args: - z (torch.Tensor): Input tensor of multi-codebooks sequence, of shape [B, K, T]. - special_token (int): Special token used to fill non-pattern coordinates in the new sequence. - keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps. - Steps that are beyond valid steps will be replaced by the special_token in that case. - Returns: - values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, S] with S - corresponding either to the sequence_steps if provided, otherwise to the length of the pattern. - indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, S]. - mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, S]. - """ - B, K, T = z.shape - indexes, mask = self._build_pattern_sequence_scatter_indexes( - T, K, keep_only_valid_steps=keep_only_valid_steps, device=str(z.device) - ) - z = z.view(B, -1) - # we append the special token as the last index of our flattened z tensor - z = torch.cat([z, torch.zeros_like(z[:, :1]) + special_token], dim=1) - values = z[:, indexes.view(-1)] - values = values.view(B, K, indexes.shape[-1]) - return values, indexes, mask - - def _build_reverted_sequence_scatter_indexes(self, sequence_steps: int, n_q: int, - keep_only_valid_steps: bool = False, - is_model_output: bool = False, - device: tp.Union[torch.device, str] = 'cpu'): - """Builds scatter indexes required to retrieve the original multi-codebook sequence - from interleaving pattern. - - Args: - sequence_steps (int): Sequence steps. - n_q (int): Number of codebooks. - keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps. - Steps that are beyond valid steps will be replaced by the special_token in that case. - is_model_output (bool): Whether to keep the sequence item corresponding to initial special token or not. - device (Union[torch.device, str]): Device for created tensors. - Returns: - torch.Tensor: Indexes for reconstructing the output, of shape [K, T]. - mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T]. - """ - ref_layout = self.valid_layout if keep_only_valid_steps else self.layout - # TODO(jade): Do we want to further truncate to only valid timesteps here as well? - timesteps = self.timesteps - assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}" - assert sequence_steps <= len(ref_layout), \ - f"sequence to revert is longer than the defined pattern: {sequence_steps} > {len(ref_layout)}" - - # ensure we take the appropriate indexes to keep the model output from the first special token as well - if is_model_output: - ref_layout = ref_layout[1:] - - # single item indexing being super slow with pytorch vs. numpy, so we use numpy here - indexes = torch.zeros(n_q, timesteps, dtype=torch.long).numpy() - mask = torch.zeros(n_q, timesteps, dtype=torch.bool).numpy() - # fill indexes with last sequence step value that will correspond to our special token - indexes[:] = n_q * sequence_steps - for s, sequence_codes in enumerate(ref_layout): - if s < sequence_steps: - for code in sequence_codes: - if code.t < timesteps: - indexes[code.q, code.t] = s + code.q * sequence_steps - mask[code.q, code.t] = 1 - indexes = torch.from_numpy(indexes).to(device) - mask = torch.from_numpy(mask).to(device) - return indexes, mask - - def revert_pattern_sequence(self, s: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False): - """Revert a sequence built from the pattern back to the original multi-codebook sequence without interleaving. - The sequence is reverted using up to timesteps if specified, and non-pattern coordinates - are filled with the special token. - - Args: - s (torch.Tensor): Interleaved sequence tensor obtained from the pattern, of shape [B, K, S]. - special_token (int or float): Special token used to fill non-pattern coordinates in the new sequence. - Returns: - values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, T] with T - corresponding either to the timesteps if provided, or the total timesteps in pattern otherwise. - indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, T]. - mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T]. - """ - B, K, S = s.shape - indexes, mask = self._build_reverted_sequence_scatter_indexes( - S, K, keep_only_valid_steps, is_model_output=False, device=str(s.device) - ) - s = s.view(B, -1) - # we append the special token as the last index of our flattened z tensor - s = torch.cat([s, torch.zeros_like(s[:, :1]) + special_token], dim=1) - values = s[:, indexes.view(-1)] - values = values.view(B, K, indexes.shape[-1]) - return values, indexes, mask - - def revert_pattern_logits(self, logits: torch.Tensor, special_token: float, keep_only_valid_steps: bool = False): - """Revert model logits obtained on a sequence built from the pattern - back to a tensor matching the original sequence. - - This method is similar to ``revert_pattern_sequence`` with the following specificities: - 1. It is designed to work with the extra cardinality dimension - 2. We return the logits for the first sequence item that matches the special_token and - which matching target in the original sequence is the first item of the sequence, - while we skip the last logits as there is no matching target - """ - B, card, K, S = logits.shape - indexes, mask = self._build_reverted_sequence_scatter_indexes( - S, K, keep_only_valid_steps, is_model_output=True, device=logits.device - ) - logits = logits.reshape(B, card, -1) - # we append the special token as the last index of our flattened z tensor - logits = torch.cat([logits, torch.zeros_like(logits[:, :, :1]) + special_token], dim=-1) # [B, card, K x S] - values = logits[:, :, indexes.view(-1)] - values = values.view(B, card, K, indexes.shape[-1]) - return values, indexes, mask - - -class CodebooksPatternProvider(ABC): - """Abstraction around providing pattern for interleaving codebooks. - - The CodebooksPatternProvider abstraction allows to implement various strategies to - define interleaving pattern of sequences composed of multiple codebooks. For a given - number of codebooks `n_q`, the pattern provider can generate a specified pattern - corresponding to a sequence of `T` timesteps with `n_q` parallel codebooks. This pattern - can be used to construct a new sequence from the original codes respecting the specified - pattern. The pattern is defined as a list of list of code coordinates, code coordinate - being a tuple with the original timestep and codebook to build the new sequence. - Note that all patterns must start with an empty list that is then used to insert a first - sequence step of special tokens in the newly generated sequence. - - Args: - n_q (int): number of codebooks. - cached (bool): if True, patterns for a given length are cached. In general - that should be true for efficiency reason to avoid synchronization points. - """ - def __init__(self, n_q: int, cached: bool = True): - assert n_q > 0 - self.n_q = n_q - self.get_pattern = lru_cache(100)(self.get_pattern) # type: ignore - - @abstractmethod - def get_pattern(self, timesteps: int) -> Pattern: - """Builds pattern with specific interleaving between codebooks. - - Args: - timesteps (int): Total numer of timesteps. - """ - raise NotImplementedError() - - -class DelayedPatternProvider(CodebooksPatternProvider): - """Provider for delayed pattern across delayed codebooks. - Codebooks are delayed in the sequence and sequence steps will contain codebooks - from different timesteps. - - Example: - Taking timesteps=4 and n_q=3, delays=None, the multi-codebook sequence: - [[1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4]] - The resulting sequence obtained from the returned pattern is: - [[S, 1, 2, 3, 4], - [S, S, 1, 2, 3], - [S, S, S, 1, 2]] - (with S being a special token) - - Args: - n_q (int): Number of codebooks. - delays (Optional[List[int]]): Delay for each of the codebooks. - If delays not defined, each codebook is delayed by 1 compared to the previous one. - flatten_first (int): Flatten the first N timesteps. - empty_initial (int): Prepend with N empty list of coordinates. - """ - def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None, - flatten_first: int = 0, empty_initial: int = 0): - super().__init__(n_q) - if delays is None: - delays = list(range(n_q)) - self.delays = delays - self.flatten_first = flatten_first - self.empty_initial = empty_initial - assert len(self.delays) == self.n_q - assert sorted(self.delays) == self.delays - - def get_pattern(self, timesteps: int) -> Pattern: - out: PatternLayout = [[]] - max_delay = max(self.delays) - if self.empty_initial: - out += [[] for _ in range(self.empty_initial)] - if self.flatten_first: - for t in range(min(timesteps, self.flatten_first)): - for q in range(self.n_q): - out.append([LayoutCoord(t, q)]) - for t in range(self.flatten_first, timesteps + max_delay): - v = [] - for q, delay in enumerate(self.delays): - t_for_q = t - delay - if t_for_q >= self.flatten_first: - v.append(LayoutCoord(t_for_q, q)) - out.append(v) - return Pattern(out, n_q=self.n_q, timesteps=timesteps) - - -class ParallelPatternProvider(DelayedPatternProvider): - """Provider for parallel pattern across codebooks. - This pattern provider is a special case of the delayed pattern with actually no delay, - hence delays=repeat(0, n_q). - - Args: - n_q (int): Number of codebooks. - """ - def __init__(self, n_q: int): - super().__init__(n_q, [0] * n_q) - - -class UnrolledPatternProvider(CodebooksPatternProvider): - """Provider for unrolling codebooks pattern. - This pattern provider enables to represent the codebook flattened completely or only to some extend - while also specifying a given delay between the flattened codebooks representation, allowing to - unroll the codebooks in the sequence. - - Example: - 1. Flattening of the codebooks. - By default, the pattern provider will fully flatten the codebooks such as flattening=range(n_q), - taking n_q = 3 and timesteps = 4: - [[1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4]] - will result into: - [[S, S, 1, S, S, 2, S, S, 3, S, S, 4], - [S, 1, S, S, 2, S, S, 3, S, S, 4, S], - [1, S, S, 2, S, S, 3, S, S, 4, S, S]] - 2. Partial flattening of the codebooks. The ``flattening`` parameter allows to specify the inner step - for each of the codebook, allowing to define which codebook to flatten (or keep in parallel), for example - taking n_q = 3, timesteps = 4 and flattening = [0, 1, 1]: - [[1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4]] - will result into: - [[S, 1, S, S, 2, S, S, 3, S, S, 4, S], - [S, 1, S, S, 2, S, S, 3, S, S, 4, S], - [1, S, S, 2, S, S, 3, S, S, 4, S, S]] - 3. Flattening with delay. The ``delay`` parameter allows to further unroll the sequence of codebooks - allowing to specify the delay per codebook. Note that the delay between codebooks flattened to the - same inner timestep should be coherent. For example, taking n_q = 3, timesteps = 4, flattening = [0, 1, 1] - and delays = [0, 3, 3]: - [[1, 2, 3, 4], - [1, 2, 3, 4], - [1, 2, 3, 4]] - will result into: - [[S, S, S, 1, S, 2, S, 3, S, 4], - [S, S, S, 1, S, 2, S, 3, S, 4], - [1, 2, 3, S, 4, S, 5, S, 6, S]] - - Args: - n_q (int): Number of codebooks. - flattening (Optional[List[int]]): Flattening schema over the codebooks. If not defined, - the codebooks will be flattened to 1 codebook per step, meaning that the sequence will - have n_q extra steps for each timestep. - delays (Optional[List[int]]): Delay for each of the codebooks. If not defined, - no delay is added and therefore will default to [0] * ``n_q``. - Note that two codebooks that will be flattened to the same inner step - should have the same delay, otherwise the pattern is considered as invalid. - """ - FlattenedCodebook = namedtuple('FlattenedCodebook', ['codebooks', 'delay']) - - def __init__(self, n_q: int, flattening: tp.Optional[tp.List[int]] = None, - delays: tp.Optional[tp.List[int]] = None): - super().__init__(n_q) - if flattening is None: - flattening = list(range(n_q)) - if delays is None: - delays = [0] * n_q - assert len(flattening) == n_q - assert len(delays) == n_q - assert sorted(flattening) == flattening - assert sorted(delays) == delays - self._flattened_codebooks = self._build_flattened_codebooks(delays, flattening) - self.max_delay = max(delays) - - def _build_flattened_codebooks(self, delays: tp.List[int], flattening: tp.List[int]): - """Build a flattened codebooks representation as a dictionary of inner step - and the actual codebook indices corresponding to the flattened codebook. For convenience, we - also store the delay associated to the flattened codebook to avoid maintaining an extra mapping. - """ - flattened_codebooks: dict = {} - for q, (inner_step, delay) in enumerate(zip(flattening, delays)): - if inner_step not in flattened_codebooks: - flat_codebook = UnrolledPatternProvider.FlattenedCodebook(codebooks=[q], delay=delay) - else: - flat_codebook = flattened_codebooks[inner_step] - assert flat_codebook.delay == delay, ( - "Delay and flattening between codebooks is inconsistent: ", - "two codebooks flattened to the same position should have the same delay." - ) - flat_codebook.codebooks.append(q) - flattened_codebooks[inner_step] = flat_codebook - return flattened_codebooks - - @property - def _num_inner_steps(self): - """Number of inner steps to unroll between timesteps in order to flatten the codebooks. - """ - return max([inner_step for inner_step in self._flattened_codebooks.keys()]) + 1 - - def num_virtual_steps(self, timesteps: int) -> int: - return timesteps * self._num_inner_steps + 1 - - def get_pattern(self, timesteps: int) -> Pattern: - """Builds pattern for delay across codebooks. - - Args: - timesteps (int): Total numer of timesteps. - """ - # the PatternLayout is built as a tuple of sequence position and list of coordinates - # so that it can be reordered properly given the required delay between codebooks of given timesteps - indexed_out: list = [(-1, [])] - max_timesteps = timesteps + self.max_delay - for t in range(max_timesteps): - # for each timestep, we unroll the flattened codebooks, - # emitting the sequence step with the corresponding delay - for step in range(self._num_inner_steps): - if step in self._flattened_codebooks: - # we have codebooks at this virtual step to emit - step_codebooks = self._flattened_codebooks[step] - t_for_q = t + step_codebooks.delay - coords = [LayoutCoord(t, q) for q in step_codebooks.codebooks] - if t_for_q < max_timesteps and t < max_timesteps: - indexed_out.append((t_for_q, coords)) - else: - # there is no codebook in this virtual step so we emit an empty list - indexed_out.append((t, [])) - out = [coords for _, coords in sorted(indexed_out)] - return Pattern(out, n_q=self.n_q, timesteps=timesteps) - - -class VALLEPattern(CodebooksPatternProvider): - """Almost VALL-E style pattern. We futher allow some delays for the - codebooks other than the first one. - - Args: - n_q (int): Number of codebooks. - delays (Optional[List[int]]): Delay for each of the codebooks. - If delays not defined, each codebook is delayed by 1 compared to the previous one. - """ - def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None): - super().__init__(n_q) - if delays is None: - delays = [0] * (n_q - 1) - self.delays = delays - assert len(self.delays) == self.n_q - 1 - assert sorted(self.delays) == self.delays - - def get_pattern(self, timesteps: int) -> Pattern: - out: PatternLayout = [[]] - for t in range(timesteps): - out.append([LayoutCoord(t, 0)]) - max_delay = max(self.delays) - for t in range(timesteps + max_delay): - v = [] - for q, delay in enumerate(self.delays): - t_for_q = t - delay - if t_for_q >= 0: - v.append(LayoutCoord(t_for_q, q + 1)) - out.append(v) - return Pattern(out, n_q=self.n_q, timesteps=timesteps) - - -class MusicLMPattern(CodebooksPatternProvider): - """Almost MusicLM style pattern. This is equivalent to full flattening - but in a different order. - - Args: - n_q (int): Number of codebooks. - group_by (int): Number of codebooks to group together. - """ - def __init__(self, n_q: int, group_by: int = 2): - super().__init__(n_q) - self.group_by = group_by - - def get_pattern(self, timesteps: int) -> Pattern: - out: PatternLayout = [[]] - for offset in range(0, self.n_q, self.group_by): - for t in range(timesteps): - for q in range(offset, offset + self.group_by): - out.append([LayoutCoord(t, q)]) - return Pattern(out, n_q=self.n_q, timesteps=timesteps) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/__init__.py deleted file mode 100644 index 9e7511148df475d077a1c0af4d8edf71a17a6c7b..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/aiofiles/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Utilities for asyncio-friendly file handling.""" -from .threadpool import ( - open, - stdin, - stdout, - stderr, - stdin_bytes, - stdout_bytes, - stderr_bytes, -) -from . import tempfile - -__all__ = [ - "open", - "tempfile", - "stdin", - "stdout", - "stderr", - "stdin_bytes", - "stdout_bytes", - "stderr_bytes", -] diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/implementations/reference.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/implementations/reference.py deleted file mode 100644 index c960a59f9bcdb3fc13605e044866fc3d263de959..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/implementations/reference.py +++ /dev/null @@ -1,1080 +0,0 @@ -import base64 -import collections -import io -import itertools -import logging -import math -import os -from functools import lru_cache -from typing import TYPE_CHECKING - -import fsspec.core - -try: - import ujson as json -except ImportError: - if not TYPE_CHECKING: - import json - -from ..asyn import AsyncFileSystem -from ..callbacks import _DEFAULT_CALLBACK -from ..core import filesystem, open, split_protocol -from ..utils import isfilelike, merge_offset_ranges, other_paths - -logger = logging.getLogger("fsspec.reference") - - -class ReferenceNotReachable(RuntimeError): - def __init__(self, reference, target, *args): - super().__init__(*args) - self.reference = reference - self.target = target - - def __str__(self): - return f'Reference "{self.reference}" failed to fetch target {self.target}' - - -def _first(d): - return list(d.values())[0] - - -def _prot_in_references(path, references): - ref = references.get(path) - if isinstance(ref, (list, tuple)): - return split_protocol(ref[0])[0] if ref[0] else ref[0] - - -def _protocol_groups(paths, references): - if isinstance(paths, str): - return {_prot_in_references(paths, references): [paths]} - out = {} - for path in paths: - protocol = _prot_in_references(path, references) - out.setdefault(protocol, []).append(path) - return out - - -class RefsValuesView(collections.abc.ValuesView): - def __iter__(self): - for val in self._mapping.zmetadata.values(): - yield json.dumps(val).encode() - yield from self._mapping._items.values() - for field in self._mapping.listdir(): - chunk_sizes = self._mapping._get_chunk_sizes(field) - if len(chunk_sizes) == 0: - yield self._mapping[field + "/0"] - continue - yield from self._mapping._generate_all_records(field) - - -class RefsItemsView(collections.abc.ItemsView): - def __iter__(self): - return zip(self._mapping.keys(), self._mapping.values()) - - -def ravel_multi_index(idx, sizes): - val = 0 - mult = 1 - for i, s in zip(idx[::-1], sizes[::-1]): - val += i * mult - mult *= s - return val - - -class LazyReferenceMapper(collections.abc.MutableMapping): - """Interface to read parquet store as if it were a standard kerchunk - references dict.""" - - # import is class level to prevent numpy dep requirement for fsspec - @property - def np(self): - import numpy as np - - return np - - @property - def pd(self): - import pandas as pd - - return pd - - def __init__( - self, root, fs=None, out_root=None, cache_size=128, categorical_threshold=10 - ): - """ - Parameters - ---------- - root : str - Root of parquet store - fs : fsspec.AbstractFileSystem - fsspec filesystem object, default is local filesystem. - cache_size : int - Maximum size of LRU cache, where cache_size*record_size denotes - the total number of references that can be loaded in memory at once. - """ - self.root = root - self.chunk_sizes = {} - self._items = {} - self.dirs = None - self.fs = fsspec.filesystem("file") if fs is None else fs - with self.fs.open("/".join([self.root, ".zmetadata"]), "rb") as f: - self._items[".zmetadata"] = f.read() - met = json.loads(self._items[".zmetadata"]) - self.record_size = met["record_size"] - self.zmetadata = met["metadata"] - self.url = self.root + "/{field}/refs.{record}.parq" - self.out_root = out_root or self.root - self.cat_thresh = categorical_threshold - - # Define function to open and decompress refs - @lru_cache(maxsize=cache_size) - def open_refs(field, record): - """cached parquet file loader""" - path = self.url.format(field=field, record=record) - with self.fs.open(path) as f: - # TODO: since all we do is iterate, is arrow without pandas - # better here? - df = self.pd.read_parquet(f, engine="fastparquet") - refs = {c: df[c].values for c in df.columns} - return refs - - self.open_refs = open_refs - - @staticmethod - def create(record_size, root, fs, **kwargs): - met = {"metadata": {}, "record_size": record_size} - fs.pipe("/".join([root, ".zmetadata"]), json.dumps(met).encode()) - return LazyReferenceMapper(root, fs, **kwargs) - - def listdir(self, basename=True): - """List top-level directories""" - if self.dirs is None: - dirs = [p.split("/", 1)[0] for p in self.zmetadata] - self.dirs = set(sorted(p for p in dirs if p and not p.startswith("."))) - listing = self.dirs - if basename: - listing = [os.path.basename(path) for path in listing] - return listing - - def ls(self, path="", detail=True): - """Shortcut file listings""" - if not path: - dirnames = self.listdir() - others = set( - [".zmetadata"] - + [name for name in self.zmetadata if "/" not in name] - + [name for name in self._items if "/" not in name] - ) - if detail is False: - others.update(dirnames) - return sorted(others) - dirinfo = [ - {"name": name, "type": "directory", "size": 0} for name in dirnames - ] - fileinfo = [ - { - "name": name, - "type": "file", - "size": len( - json.dumps(self.zmetadata[name]) - if name in self.zmetadata - else self._items[name] - ), - } - for name in others - ] - return sorted(dirinfo + fileinfo, key=lambda s: s["name"]) - parts = path.split("/", 1) - if len(parts) > 1: - raise FileNotFoundError("Cannot list within directories right now") - field = parts[0] - others = set( - [name for name in self.zmetadata if name.startswith(f"{path}/")] - + [name for name in self._items if name.startswith(f"{path}/")] - ) - fileinfo = [ - { - "name": name, - "type": "file", - "size": len( - json.dumps(self.zmetadata[name]) - if name in self.zmetadata - else self._items[name] - ), - } - for name in others - ] - keys = self._keys_in_field(field) - - if detail is False: - return list(others) + list(keys) - recs = self._generate_all_records(field) - recinfo = [ - {"name": name, "type": "file", "size": rec[-1]} - for name, rec in zip(keys, recs) - if rec[0] # filters out path==None, deleted/missing - ] - return fileinfo + recinfo - - def _load_one_key(self, key): - """Get the reference for one key - - Returns bytes, one-element list or three-element list. - """ - if key in self._items: - return self._items[key] - elif key in self.zmetadata: - return json.dumps(self.zmetadata[key]).encode() - elif "/" not in key or self._is_meta(key): - raise KeyError(key) - field, sub_key = key.split("/") - record, _, _ = self._key_to_record(key) - maybe = self._items.get((field, key), {}).get(sub_key, False) - if maybe is None: - # explicitly deleted - raise KeyError - elif maybe: - return maybe - - # Chunk keys can be loaded from row group and cached in LRU cache - try: - record, ri, chunk_size = self._key_to_record(key) - if chunk_size == 0: - return b"" - refs = self.open_refs(field, record) - except (ValueError, TypeError, FileNotFoundError): - raise KeyError(key) - columns = ["path", "offset", "size", "raw"] - selection = [refs[c][ri] if c in refs else None for c in columns] - raw = selection[-1] - if raw is not None: - return raw - if selection[0] is None: - raise KeyError("This reference has been deleted") - if selection[1:3] == [0, 0]: - # URL only - return selection[:1] - # URL, offset, size - return selection[:3] - - @lru_cache(4096) - def _key_to_record(self, key): - """Details needed to construct a reference for one key""" - field, chunk = key.split("/") - chunk_sizes = self._get_chunk_sizes(field) - if len(chunk_sizes) == 0: - return 0, 0, 0 - chunk_idx = [int(c) for c in chunk.split(".")] - chunk_number = ravel_multi_index(chunk_idx, chunk_sizes) - record = chunk_number // self.record_size - ri = chunk_number % self.record_size - return record, ri, len(chunk_sizes) - - def _get_chunk_sizes(self, field): - """The number of chunks along each axis for a given field""" - if field not in self.chunk_sizes: - zarray = self.zmetadata[f"{field}/.zarray"] - size_ratio = [ - math.ceil(s / c) for s, c in zip(zarray["shape"], zarray["chunks"]) - ] - self.chunk_sizes[field] = size_ratio - return self.chunk_sizes[field] - - def _generate_record(self, field, record): - """The references for a given parquet file of a given field""" - refs = self.open_refs(field, record) - it = iter(zip(refs.values())) - if len(refs) == 3: - # All urls - return (list(t) for t in it) - elif len(refs) == 1: - # All raws - return refs["raw"] - else: - # Mix of urls and raws - return (list(t[:3]) if not t[3] else t[3] for t in it) - - def _generate_all_records(self, field): - """Load all the references within a field by iterating over the parquet files""" - nrec = 1 - for ch in self._get_chunk_sizes(field): - nrec *= ch - nrec = math.ceil(nrec / self.record_size) - for record in range(nrec): - yield from self._generate_record(field, record) - - def values(self): - return RefsValuesView(self) - - def items(self): - return RefsItemsView(self) - - def __hash__(self): - return id(self) - - @lru_cache(20) - def __getitem__(self, key): - return self._load_one_key(key) - - def __setitem__(self, key, value): - if "/" in key and not self._is_meta(key): - field, chunk = key.split("/") - record, i, _ = self._key_to_record(key) - subdict = self._items.setdefault((field, record), {}) - subdict[i] = value - if len(subdict) == self.record_size: - self.write(field, record) - else: - # metadata or top-level - self._items[key] = value - self.zmetadata[key] = json.loads( - value.decode() if isinstance(value, bytes) else value - ) - - @staticmethod - def _is_meta(key): - return key.startswith(".z") or "/.z" in key - - def __delitem__(self, key): - if key in self._items: - del self._items[key] - elif key in self.zmetadata: - del self.zmetadata[key] - else: - if "/" in key and not self._is_meta(key): - field, chunk = key.split("/") - record, _, _ = self._key_to_record(key) - subdict = self._items.setdefault((field, record), {}) - subdict[chunk] = None - if len(subdict) == self.record_size: - self.write(field, record) - else: - # metadata or top-level - self._items[key] = None - - def write(self, field, record, base_url=None, storage_options=None): - # extra requirements if writing - import kerchunk.df - import numpy as np - import pandas as pd - - # TODO: if the dict is incomplete, also load records and merge in - partition = self._items[(field, record)] - fn = f"{base_url or self.out_root}/{field}/refs.{record}.parq" - - #### - paths = np.full(self.record_size, np.nan, dtype="O") - offsets = np.zeros(self.record_size, dtype="int64") - sizes = np.zeros(self.record_size, dtype="int64") - raws = np.full(self.record_size, np.nan, dtype="O") - nraw = 0 - npath = 0 - for j, data in partition.items(): - if isinstance(data, list): - npath += 1 - paths[j] = data[0] - if len(data) > 1: - offsets[j] = data[1] - sizes[j] = data[2] - else: - nraw += 1 - raws[j] = kerchunk.df._proc_raw(data) - # TODO: only save needed columns - df = pd.DataFrame( - dict( - path=paths, - offset=offsets, - size=sizes, - raw=raws, - ), - copy=False, - ) - if df.path.count() / (df.path.nunique() or 1) > self.cat_thresh: - df["path"] = df["path"].astype("category") - object_encoding = dict(raw="bytes", path="utf8") - has_nulls = ["path", "raw"] - - self.fs.mkdirs(f"{base_url or self.out_root}/{field}", exist_ok=True) - df.to_parquet( - fn, - engine="fastparquet", - storage_options=storage_options - or getattr(self.fs, "storage_options", None), - compression="zstd", - index=False, - stats=False, - object_encoding=object_encoding, - has_nulls=has_nulls, - # **kwargs, - ) - partition.clear() - self._items.pop((field, record)) - - def flush(self, base_url=None, storage_options=None): - """Output any modified or deleted keys - - Parameters - ---------- - base_url: str - Location of the output - """ - # write what we have so far and clear sub chunks - for thing in list(self._items): - if isinstance(thing, tuple): - field, record = thing - self.write( - field, - record, - base_url=base_url, - storage_options=storage_options, - ) - - # gather .zmetadata from self._items and write that too - for k in list(self._items): - if k != ".zmetadata" and ".z" in k: - self.zmetadata[k] = json.loads(self._items.pop(k)) - met = {"metadata": self.zmetadata, "record_size": self.record_size} - self._items[".zmetadata"] = json.dumps(met).encode() - self.fs.pipe( - "/".join([base_url or self.out_root, ".zmetadata"]), - self._items[".zmetadata"], - ) - - # TODO: only clear those that we wrote to? - self.open_refs.cache_clear() - - def __len__(self): - # Caveat: This counts expected references, not actual - count = 0 - for field in self.listdir(): - if field.startswith("."): - count += 1 - else: - chunk_sizes = self._get_chunk_sizes(field) - nchunks = self.np.product(chunk_sizes) - count += nchunks - count += len(self.zmetadata) # all metadata keys - count += len(self._items) # the metadata file itself - return count - - def __iter__(self): - # Caveat: Note that this generates all expected keys, but does not - # account for reference keys that are missing. - metas = set(self.zmetadata) - metas.update(self._items) - for bit in metas: - if isinstance(bit, str): - yield bit - for field in self.listdir(): - yield from self._keys_in_field(field) - - def __contains__(self, item): - try: - self._load_one_key(item) - return True - except KeyError: - return False - - def _keys_in_field(self, field): - """List key names in given field - - Produces strings like "field/x.y" appropriate from the chunking of the array - """ - chunk_sizes = self._get_chunk_sizes(field) - if len(chunk_sizes) == 0: - yield field + "/0" - return - inds = itertools.product(*(range(i) for i in chunk_sizes)) - for ind in inds: - yield field + "/" + ".".join([str(c) for c in ind]) - - -class ReferenceFileSystem(AsyncFileSystem): - """View byte ranges of some other file as a file system - Initial version: single file system target, which must support - async, and must allow start and end args in _cat_file. Later versions - may allow multiple arbitrary URLs for the targets. - This FileSystem is read-only. It is designed to be used with async - targets (for now). This FileSystem only allows whole-file access, no - ``open``. We do not get original file details from the target FS. - Configuration is by passing a dict of references at init, or a URL to - a JSON file containing the same; this dict - can also contain concrete data for some set of paths. - Reference dict format: - {path0: bytes_data, path1: (target_url, offset, size)} - https://github.com/fsspec/kerchunk/blob/main/README.md - """ - - protocol = "reference" - - def __init__( - self, - fo, - target=None, - ref_storage_args=None, - target_protocol=None, - target_options=None, - remote_protocol=None, - remote_options=None, - fs=None, - template_overrides=None, - simple_templates=True, - max_gap=64_000, - max_block=256_000_000, - cache_size=128, - **kwargs, - ): - """ - Parameters - ---------- - fo : dict or str - The set of references to use for this instance, with a structure as above. - If str referencing a JSON file, will use fsspec.open, in conjunction - with target_options and target_protocol to open and parse JSON at this - location. If a directory, then assume references are a set of parquet - files to be loaded lazily. - target : str - For any references having target_url as None, this is the default file - target to use - ref_storage_args : dict - If references is a str, use these kwargs for loading the JSON file. - Deprecated: use target_options instead. - target_protocol : str - Used for loading the reference file, if it is a path. If None, protocol - will be derived from the given path - target_options : dict - Extra FS options for loading the reference file ``fo``, if given as a path - remote_protocol : str - The protocol of the filesystem on which the references will be evaluated - (unless fs is provided). If not given, will be derived from the first - URL that has a protocol in the templates or in the references, in that - order. - remote_options : dict - kwargs to go with remote_protocol - fs : AbstractFileSystem | dict(str, (AbstractFileSystem | dict)) - Directly provide a file system(s): - - a single filesystem instance - - a dict of protocol:filesystem, where each value is either a filesystem - instance, or a dict of kwargs that can be used to create in - instance for the given protocol - - If this is given, remote_options and remote_protocol are ignored. - template_overrides : dict - Swap out any templates in the references file with these - useful for - testing. - simple_templates: bool - Whether templates can be processed with simple replace (True) or if - jinja is needed (False, much slower). All reference sets produced by - ``kerchunk`` are simple in this sense, but the spec allows for complex. - max_gap, max_block: int - For merging multiple concurrent requests to the same remote file. - Neighboring byte ranges will only be merged when their - inter-range gap is <= ``max_gap``. Default is 64KB. Set to 0 - to only merge when it requires no extra bytes. Pass a negative - number to disable merging, appropriate for local target files. - Neighboring byte ranges will only be merged when the size of - the aggregated range is <= ``max_block``. Default is 256MB. - cache_size : int - Maximum size of LRU cache, where cache_size*record_size denotes - the total number of references that can be loaded in memory at once. - Only used for lazily loaded references. - kwargs : passed to parent class - """ - super().__init__(**kwargs) - self.target = target - self.template_overrides = template_overrides - self.simple_templates = simple_templates - self.templates = {} - self.fss = {} - self._dircache = {} - self.max_gap = max_gap - self.max_block = max_block - if isinstance(fo, str): - dic = dict( - **(ref_storage_args or target_options or {}), protocol=target_protocol - ) - ref_fs, fo2 = fsspec.core.url_to_fs(fo, **dic) - if ref_fs.isfile(fo): - # text JSON - with fsspec.open(fo, "rb", **dic) as f: - logger.info("Read reference from URL %s", fo) - text = json.load(f) - self._process_references(text, template_overrides) - else: - # Lazy parquet refs - logger.info("Open lazy reference dict from URL %s", fo) - self.references = LazyReferenceMapper( - fo2, - fs=ref_fs, - cache_size=cache_size, - ) - else: - # dictionaries - self._process_references(fo, template_overrides) - if isinstance(fs, dict): - self.fss = { - k: ( - fsspec.filesystem(k.split(":", 1)[0], **opts) - if isinstance(opts, dict) - else opts - ) - for k, opts in fs.items() - } - if None not in self.fss: - self.fss[None] = filesystem("file") - return - if fs is not None: - # single remote FS - remote_protocol = ( - fs.protocol[0] if isinstance(fs.protocol, tuple) else fs.protocol - ) - self.fss[remote_protocol] = fs - - if remote_protocol is None: - # get single protocol from any templates - for ref in self.templates.values(): - if callable(ref): - ref = ref() - protocol, _ = fsspec.core.split_protocol(ref) - if protocol and protocol not in self.fss: - fs = filesystem(protocol, **(remote_options or {})) - self.fss[protocol] = fs - if remote_protocol is None: - # get single protocol from references - for ref in self.references.values(): - if callable(ref): - ref = ref() - if isinstance(ref, list) and ref[0]: - protocol, _ = fsspec.core.split_protocol(ref[0]) - if protocol not in self.fss: - fs = filesystem(protocol, **(remote_options or {})) - self.fss[protocol] = fs - # only use first remote URL - break - - if remote_protocol and remote_protocol not in self.fss: - fs = filesystem(remote_protocol, **(remote_options or {})) - self.fss[remote_protocol] = fs - - self.fss[None] = fs or filesystem("file") # default one - - def _cat_common(self, path, start=None, end=None): - path = self._strip_protocol(path) - logger.debug(f"cat: {path}") - try: - part = self.references[path] - except KeyError: - raise FileNotFoundError(path) - if isinstance(part, str): - part = part.encode() - if isinstance(part, bytes): - logger.debug(f"Reference: {path}, type bytes") - if part.startswith(b"base64:"): - part = base64.b64decode(part[7:]) - return part, None, None - - if len(part) == 1: - logger.debug(f"Reference: {path}, whole file => {part}") - url = part[0] - start1, end1 = start, end - else: - url, start0, size = part - logger.debug(f"Reference: {path} => {url}, offset {start0}, size {size}") - end0 = start0 + size - - if start is not None: - if start >= 0: - start1 = start0 + start - else: - start1 = end0 + start - else: - start1 = start0 - if end is not None: - if end >= 0: - end1 = start0 + end - else: - end1 = end0 + end - else: - end1 = end0 - if url is None: - url = self.target - return url, start1, end1 - - async def _cat_file(self, path, start=None, end=None, **kwargs): - part_or_url, start0, end0 = self._cat_common(path, start=start, end=end) - if isinstance(part_or_url, bytes): - return part_or_url[start:end] - protocol, _ = split_protocol(part_or_url) - try: - await self.fss[protocol]._cat_file(part_or_url, start=start, end=end) - except Exception as e: - raise ReferenceNotReachable(path, part_or_url) from e - - def cat_file(self, path, start=None, end=None, **kwargs): - part_or_url, start0, end0 = self._cat_common(path, start=start, end=end) - if isinstance(part_or_url, bytes): - return part_or_url[start:end] - protocol, _ = split_protocol(part_or_url) - try: - return self.fss[protocol].cat_file(part_or_url, start=start0, end=end0) - except Exception as e: - raise ReferenceNotReachable(path, part_or_url) from e - - def pipe_file(self, path, value, **_): - """Temporarily add binary data or reference as a file""" - self.references[path] = value - - async def _get_file(self, rpath, lpath, **kwargs): - if self.isdir(rpath): - return os.makedirs(lpath, exist_ok=True) - data = await self._cat_file(rpath) - with open(lpath, "wb") as f: - f.write(data) - - def get_file(self, rpath, lpath, callback=_DEFAULT_CALLBACK, **kwargs): - if self.isdir(rpath): - return os.makedirs(lpath, exist_ok=True) - data = self.cat_file(rpath, **kwargs) - callback.set_size(len(data)) - if isfilelike(lpath): - lpath.write(data) - else: - with open(lpath, "wb") as f: - f.write(data) - callback.absolute_update(len(data)) - - def get(self, rpath, lpath, recursive=False, **kwargs): - if recursive: - # trigger directory build - self.ls("") - rpath = self.expand_path(rpath, recursive=recursive) - fs = fsspec.filesystem("file", auto_mkdir=True) - targets = other_paths(rpath, lpath) - if recursive: - data = self.cat([r for r in rpath if not self.isdir(r)]) - else: - data = self.cat(rpath) - for remote, local in zip(rpath, targets): - if remote in data: - fs.pipe_file(local, data[remote]) - - def cat(self, path, recursive=False, on_error="raise", **kwargs): - if isinstance(path, str) and recursive: - raise NotImplementedError - if isinstance(path, list) and (recursive or any("*" in p for p in path)): - raise NotImplementedError - proto_dict = _protocol_groups(path, self.references) - out = {} - for proto, paths in proto_dict.items(): - fs = self.fss[proto] - urls, starts, ends = [], [], [] - for p in paths: - # find references or label not-found. Early exit if any not - # found and on_error is "raise" - try: - u, s, e = self._cat_common(p) - urls.append(u) - starts.append(s) - ends.append(e) - except FileNotFoundError as err: - if on_error == "raise": - raise - if on_error != "omit": - out[p] = err - - # process references into form for merging - urls2 = [] - starts2 = [] - ends2 = [] - paths2 = [] - whole_files = set() - for u, s, e, p in zip(urls, starts, ends, paths): - if isinstance(u, bytes): - # data - out[p] = u - elif s is None: - # whole file - limits are None, None, but no further - # entries take for this file - whole_files.add(u) - urls2.append(u) - starts2.append(s) - ends2.append(e) - paths2.append(p) - for u, s, e, p in zip(urls, starts, ends, paths): - # second run to account for files that are to be loaded whole - if s is not None and u not in whole_files: - urls2.append(u) - starts2.append(s) - ends2.append(e) - paths2.append(p) - - # merge and fetch consolidated ranges - new_paths, new_starts, new_ends = merge_offset_ranges( - list(urls2), - list(starts2), - list(ends2), - sort=True, - max_gap=self.max_gap, - max_block=self.max_block, - ) - bytes_out = fs.cat_ranges(new_paths, new_starts, new_ends) - - # unbundle from merged bytes - simple approach - for u, s, e, p in zip(urls, starts, ends, paths): - if p in out: - continue # was bytes, already handled - for np, ns, ne, b in zip(new_paths, new_starts, new_ends, bytes_out): - if np == u and (ns is None or ne is None): - if isinstance(b, Exception): - out[p] = b - else: - out[p] = b[s:e] - elif np == u and s >= ns and e <= ne: - if isinstance(b, Exception): - out[p] = b - else: - out[p] = b[s - ns : (e - ne) or None] - - for k, v in out.copy().items(): - # these were valid references, but fetch failed, so transform exc - if isinstance(v, Exception) and k in self.references: - ex = out[k] - new_ex = ReferenceNotReachable(k, self.references[k]) - new_ex.__cause__ = ex - if on_error == "raise": - raise new_ex - elif on_error != "omit": - out[k] = new_ex - - if len(out) == 1 and isinstance(path, str) and "*" not in path: - return _first(out) - return out - - def _process_references(self, references, template_overrides=None): - vers = references.get("version", None) - if vers is None: - self._process_references0(references) - elif vers == 1: - self._process_references1(references, template_overrides=template_overrides) - else: - raise ValueError(f"Unknown reference spec version: {vers}") - # TODO: we make dircache by iterating over all entries, but for Spec >= 1, - # can replace with programmatic. Is it even needed for mapper interface? - - def _process_references0(self, references): - """Make reference dict for Spec Version 0""" - self.references = references - - def _process_references1(self, references, template_overrides=None): - if not self.simple_templates or self.templates: - import jinja2 - self.references = {} - self._process_templates(references.get("templates", {})) - - @lru_cache(1000) - def _render_jinja(u): - return jinja2.Template(u).render(**self.templates) - - for k, v in references.get("refs", {}).items(): - if isinstance(v, str): - if v.startswith("base64:"): - self.references[k] = base64.b64decode(v[7:]) - self.references[k] = v - elif self.templates: - u = v[0] - if "{{" in u: - if self.simple_templates: - u = ( - u.replace("{{", "{") - .replace("}}", "}") - .format(**self.templates) - ) - else: - u = _render_jinja(u) - self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]] - else: - self.references[k] = v - self.references.update(self._process_gen(references.get("gen", []))) - - def _process_templates(self, tmp): - self.templates = {} - if self.template_overrides is not None: - tmp.update(self.template_overrides) - for k, v in tmp.items(): - if "{{" in v: - import jinja2 - - self.templates[k] = lambda temp=v, **kwargs: jinja2.Template( - temp - ).render(**kwargs) - else: - self.templates[k] = v - - def _process_gen(self, gens): - out = {} - for gen in gens: - dimension = { - k: v - if isinstance(v, list) - else range(v.get("start", 0), v["stop"], v.get("step", 1)) - for k, v in gen["dimensions"].items() - } - products = ( - dict(zip(dimension.keys(), values)) - for values in itertools.product(*dimension.values()) - ) - for pr in products: - import jinja2 - - key = jinja2.Template(gen["key"]).render(**pr, **self.templates) - url = jinja2.Template(gen["url"]).render(**pr, **self.templates) - if ("offset" in gen) and ("length" in gen): - offset = int( - jinja2.Template(gen["offset"]).render(**pr, **self.templates) - ) - length = int( - jinja2.Template(gen["length"]).render(**pr, **self.templates) - ) - out[key] = [url, offset, length] - elif ("offset" in gen) ^ ("length" in gen): - raise ValueError( - "Both 'offset' and 'length' are required for a " - "reference generator entry if either is provided." - ) - else: - out[key] = [url] - return out - - def _dircache_from_items(self): - self.dircache = {"": []} - it = self.references.items() - for path, part in it: - if isinstance(part, (bytes, str)): - size = len(part) - elif len(part) == 1: - size = None - else: - _, start, size = part - par = path.rsplit("/", 1)[0] if "/" in path else "" - par0 = par - while par0 and par0 not in self.dircache: - # build parent directories - self.dircache[par0] = [] - self.dircache.setdefault( - par0.rsplit("/", 1)[0] if "/" in par0 else "", [] - ).append({"name": par0, "type": "directory", "size": 0}) - par0 = self._parent(par0) - - self.dircache[par].append({"name": path, "type": "file", "size": size}) - - def _open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs): - data = self.cat_file(path) # load whole chunk into memory - return io.BytesIO(data) - - def ls(self, path, detail=True, **kwargs): - path = self._strip_protocol(path) - if isinstance(self.references, LazyReferenceMapper): - try: - return self.references.ls(path, detail) - except KeyError: - pass - raise FileNotFoundError(f"'{path}' is not a known key") - if not self.dircache: - self._dircache_from_items() - out = self._ls_from_cache(path) - if out is None: - raise FileNotFoundError(path) - if detail: - return out - return [o["name"] for o in out] - - def exists(self, path, **kwargs): # overwrite auto-sync version - return self.isdir(path) or self.isfile(path) - - def isdir(self, path): # overwrite auto-sync version - if self.dircache: - return path in self.dircache - elif isinstance(self.references, LazyReferenceMapper): - return path in self.references.listdir("") - else: - # this may be faster than building dircache for single calls, but - # by looping will be slow for many calls; could cache it? - return any(_.startswith(f"{path}/") for _ in self.references) - - def isfile(self, path): # overwrite auto-sync version - return path in self.references - - async def _ls(self, path, detail=True, **kwargs): # calls fast sync code - return self.ls(path, detail, **kwargs) - - def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): - if withdirs: - return super().find( - path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs - ) - if path: - path = self._strip_protocol(path) - r = sorted(k for k in self.references if k.startswith(path)) - else: - r = sorted(self.references) - if detail: - if not self.dircache: - self._dircache_from_items() - return {k: self._ls_from_cache(k)[0] for k in r} - else: - return r - - def info(self, path, **kwargs): - out = self.references.get(path) - if out is not None: - if isinstance(out, (str, bytes)): - # decode base64 here - return {"name": path, "type": "file", "size": len(out)} - elif len(out) > 1: - return {"name": path, "type": "file", "size": out[2]} - else: - out0 = [{"name": path, "type": "file", "size": None}] - else: - out = self.ls(path, True) - out0 = [o for o in out if o["name"] == path] - if not out0: - return {"name": path, "type": "directory", "size": 0} - if out0[0]["size"] is None: - # if this is a whole remote file, update size using remote FS - prot, _ = split_protocol(self.references[path][0]) - out0[0]["size"] = self.fss[prot].size(self.references[path][0]) - return out0[0] - - async def _info(self, path, **kwargs): # calls fast sync code - return self.info(path) - - async def _rm_file(self, path, **kwargs): - self.references.pop( - path, None - ) # ignores FileNotFound, just as well for directories - self.dircache.clear() # this is a bit heavy handed - - async def _pipe_file(self, path, data): - # can be str or bytes - self.references[path] = data - self.dircache.clear() # this is a bit heavy handed - - async def _put_file(self, lpath, rpath): - # puts binary - with open(lpath, "rb") as f: - self.references[rpath] = f.read() - self.dircache.clear() # this is a bit heavy handed - - def save_json(self, url, **storage_options): - """Write modified references into new location""" - out = {} - for k, v in self.references.items(): - if isinstance(v, bytes): - try: - out[k] = v.decode("ascii") - except UnicodeDecodeError: - out[k] = (b"base64:" + base64.b64encode(v)).decode() - else: - out[k] = v - with fsspec.open(url, "wb", **storage_options) as f: - f.write(json.dumps({"version": 1, "refs": out}).encode()) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/dataframe.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/dataframe.py deleted file mode 100644 index 8c8c78a3d9c653d4fb5c5c50a0de8eb2ffbccab6..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/dataframe.py +++ /dev/null @@ -1,304 +0,0 @@ -"""gr.Dataframe() component""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, Callable, Literal - -import numpy as np -import pandas as pd -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable - -from gradio import utils -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, - EventListenerMethod, - Inputable, - Selectable, -) - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: list[str] - data: list[list[str | int | bool]] - - -set_documentation_group("component") - - -@document() -class Dataframe(Changeable, Inputable, Selectable, IOComponent, JSONSerializable): - """ - Accepts or displays 2D input through a spreadsheet-like component for dataframes. - Preprocessing: passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type` - Postprocessing: expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, a {Dict} with keys `data` (and optionally `headers`), or {str} path to a csv, which is rendered in the spreadsheet. - Examples-format: a {str} filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data. - Demos: filter_records, matrix_transpose, tax_calculator - """ - - markdown_parser = None - - def __init__( - self, - value: list[list[Any]] | Callable | None = None, - *, - headers: list[str] | None = None, - row_count: int | tuple[int, str] = (1, "dynamic"), - col_count: int | tuple[int, str] | None = None, - datatype: str | list[str] = "str", - type: Literal["pandas", "numpy", "array"] = "pandas", - max_rows: int | None = 20, - max_cols: int | None = None, - overflow_row_behaviour: Literal["paginate", "show_ends"] = "paginate", - label: str | None = None, - every: float | None = None, - show_label: bool | None = None, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - wrap: bool = False, - **kwargs, - ): - """ - Parameters: - value: Default value as a 2-dimensional list of values. If callable, the function will be called whenever the app loads to set the initial value of the component. - headers: List of str header names. If None, no headers are shown. - row_count: Limit number of rows for input and decide whether user can create new rows. The first element of the tuple is an `int`, the row count; the second should be 'fixed' or 'dynamic', the new row behaviour. If an `int` is passed the rows default to 'dynamic' - col_count: Limit number of columns for input and decide whether user can create new columns. The first element of the tuple is an `int`, the number of columns; the second should be 'fixed' or 'dynamic', the new column behaviour. If an `int` is passed the columns default to 'dynamic' - datatype: Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", "date", and "markdown". - type: Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array. - label: component name in interface. - max_rows: Maximum number of rows to display at once. Set to None for infinite. - max_cols: Maximum number of columns to display at once. Set to None for infinite. - overflow_row_behaviour: If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, will allow users to edit the dataframe; if False, can only be used to display data. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - wrap: if True text in table cells will wrap when appropriate, if False the table will scroll horizontally. Defaults to False. - """ - - self.wrap = wrap - self.row_count = self.__process_counts(row_count) - self.col_count = self.__process_counts( - col_count, len(headers) if headers else 3 - ) - - self.__validate_headers(headers, self.col_count[0]) - - self.headers = ( - headers if headers is not None else list(range(1, self.col_count[0] + 1)) - ) - self.datatype = ( - datatype if isinstance(datatype, list) else [datatype] * self.col_count[0] - ) - valid_types = ["pandas", "numpy", "array"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - values = { - "str": "", - "number": 0, - "bool": False, - "date": "01/01/1970", - "markdown": "", - "html": "", - } - column_dtypes = ( - [datatype] * self.col_count[0] if isinstance(datatype, str) else datatype - ) - self.empty_input = [ - [values[c] for c in column_dtypes] for _ in range(self.row_count[0]) - ] - - self.max_rows = max_rows - self.max_cols = max_cols - self.overflow_row_behaviour = overflow_row_behaviour - self.select: EventListenerMethod - """ - Event listener for when the user selects cell within Dataframe. - Uses event data gradio.SelectData to carry `value` referring to value of selected cell, and `index` tuple to refer to index row and column. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "headers": self.headers, - "datatype": self.datatype, - "row_count": self.row_count, - "col_count": self.col_count, - "value": self.value, - "max_rows": self.max_rows, - "max_cols": self.max_cols, - "overflow_row_behaviour": self.overflow_row_behaviour, - "wrap": self.wrap, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - max_rows: int | None = None, - max_cols: str | None = None, - label: str | None = None, - show_label: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - return { - "max_rows": max_rows, - "max_cols": max_cols, - "label": label, - "show_label": show_label, - "scale": scale, - "min_width": min_width, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - - def preprocess(self, x: DataframeData): - """ - Parameters: - x: 2D array of str, numeric, or bool data - Returns: - Dataframe in requested format - """ - if self.type == "pandas": - if x.get("headers") is not None: - return pd.DataFrame(x["data"], columns=x.get("headers")) - else: - return pd.DataFrame(x["data"]) - if self.type == "numpy": - return np.array(x["data"]) - elif self.type == "array": - return x["data"] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'pandas', 'numpy', 'array'." - ) - - def postprocess( - self, y: str | pd.DataFrame | np.ndarray | list[list[str | float]] | dict - ) -> dict: - """ - Parameters: - y: dataframe in given format - Returns: - JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data - """ - if y is None: - return self.postprocess(self.empty_input) - if isinstance(y, dict): - return y - if isinstance(y, str): - dataframe = pd.read_csv(y) - return { - "headers": list(dataframe.columns), - "data": Dataframe.__process_markdown( - dataframe.to_dict(orient="split")["data"], self.datatype - ), - } - if isinstance(y, pd.DataFrame): - return { - "headers": list(y.columns), # type: ignore - "data": Dataframe.__process_markdown( - y.to_dict(orient="split")["data"], self.datatype # type: ignore - ), - } - if isinstance(y, (np.ndarray, list)): - if len(y) == 0: - return self.postprocess([[]]) - if isinstance(y, np.ndarray): - y = y.tolist() - assert isinstance(y, list), "output cannot be converted to list" - - _headers = self.headers - - if len(self.headers) < len(y[0]): - _headers = [ - *self.headers, - *list(range(len(self.headers) + 1, len(y[0]) + 1)), - ] - elif len(self.headers) > len(y[0]): - _headers = self.headers[: len(y[0])] - - return { - "headers": _headers, - "data": Dataframe.__process_markdown(y, self.datatype), - } - raise ValueError("Cannot process value as a Dataframe") - - @staticmethod - def __process_counts(count, default=3) -> tuple[int, str]: - if count is None: - return (default, "dynamic") - if type(count) == int or type(count) == float: - return (int(count), "dynamic") - else: - return count - - @staticmethod - def __validate_headers(headers: list[str] | None, col_count: int): - if headers is not None and len(headers) != col_count: - raise ValueError( - f"The length of the headers list must be equal to the col_count int.\n" - f"The column count is set to {col_count} but `headers` has {len(headers)} items. " - f"Check the values passed to `col_count` and `headers`." - ) - - @classmethod - def __process_markdown(cls, data: list[list[Any]], datatype: list[str]): - if "markdown" not in datatype: - return data - - if cls.markdown_parser is None: - cls.markdown_parser = utils.get_markdown_parser() - - for i in range(len(data)): - for j in range(len(data[i])): - if datatype[j] == "markdown": - data[i][j] = cls.markdown_parser.render(data[i][j]) - - return data - - def as_example(self, input_data: pd.DataFrame | np.ndarray | str | None): - if input_data is None: - return "" - elif isinstance(input_data, pd.DataFrame): - return input_data.head(n=5).to_dict(orient="split")["data"] # type: ignore - elif isinstance(input_data, np.ndarray): - return input_data.tolist() - return input_data diff --git a/spaces/deelerb/3dselfie/PIFu/apps/eval.py b/spaces/deelerb/3dselfie/PIFu/apps/eval.py deleted file mode 100644 index a0ee3fa66c75a144da5c155b927f63170b7e923c..0000000000000000000000000000000000000000 --- a/spaces/deelerb/3dselfie/PIFu/apps/eval.py +++ /dev/null @@ -1,153 +0,0 @@ -import tqdm -import glob -import torchvision.transforms as transforms -from PIL import Image -from lib.model import * -from lib.train_util import * -from lib.sample_util import * -from lib.mesh_util import * -# from lib.options import BaseOptions -from torch.utils.data import DataLoader -import torch -import numpy as np -import json -import time -import sys -import os - -sys.path.insert(0, os.path.abspath( - os.path.join(os.path.dirname(__file__), '..'))) -ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - - -# # get options -# opt = BaseOptions().parse() - -class Evaluator: - def __init__(self, opt, projection_mode='orthogonal'): - self.opt = opt - self.load_size = self.opt.loadSize - self.to_tensor = transforms.Compose([ - transforms.Resize(self.load_size), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) - ]) - # set cuda - cuda = torch.device( - 'cuda:%d' % opt.gpu_id) if torch.cuda.is_available() else torch.device('cpu') - - # create net - netG = HGPIFuNet(opt, projection_mode).to(device=cuda) - print('Using Network: ', netG.name) - - if opt.load_netG_checkpoint_path: - netG.load_state_dict(torch.load( - opt.load_netG_checkpoint_path, map_location=cuda)) - - if opt.load_netC_checkpoint_path is not None: - print('loading for net C ...', opt.load_netC_checkpoint_path) - netC = ResBlkPIFuNet(opt).to(device=cuda) - netC.load_state_dict(torch.load( - opt.load_netC_checkpoint_path, map_location=cuda)) - else: - netC = None - - os.makedirs(opt.results_path, exist_ok=True) - os.makedirs('%s/%s' % (opt.results_path, opt.name), exist_ok=True) - - opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt') - with open(opt_log, 'w') as outfile: - outfile.write(json.dumps(vars(opt), indent=2)) - - self.cuda = cuda - self.netG = netG - self.netC = netC - - def load_image(self, image_path, mask_path): - # Name - img_name = os.path.splitext(os.path.basename(image_path))[0] - # Calib - B_MIN = np.array([-1, -1, -1]) - B_MAX = np.array([1, 1, 1]) - projection_matrix = np.identity(4) - projection_matrix[1, 1] = -1 - calib = torch.Tensor(projection_matrix).float() - # Mask - mask = Image.open(mask_path).convert('L') - mask = transforms.Resize(self.load_size)(mask) - mask = transforms.ToTensor()(mask).float() - # image - image = Image.open(image_path).convert('RGB') - image = self.to_tensor(image) - image = mask.expand_as(image) * image - return { - 'name': img_name, - 'img': image.unsqueeze(0), - 'calib': calib.unsqueeze(0), - 'mask': mask.unsqueeze(0), - 'b_min': B_MIN, - 'b_max': B_MAX, - } - - def load_image_from_memory(self, image_path, mask_path, img_name): - # Calib - B_MIN = np.array([-1, -1, -1]) - B_MAX = np.array([1, 1, 1]) - projection_matrix = np.identity(4) - projection_matrix[1, 1] = -1 - calib = torch.Tensor(projection_matrix).float() - # Mask - mask = Image.fromarray(mask_path).convert('L') - mask = transforms.Resize(self.load_size)(mask) - mask = transforms.ToTensor()(mask).float() - # image - image = Image.fromarray(image_path).convert('RGB') - image = self.to_tensor(image) - image = mask.expand_as(image) * image - return { - 'name': img_name, - 'img': image.unsqueeze(0), - 'calib': calib.unsqueeze(0), - 'mask': mask.unsqueeze(0), - 'b_min': B_MIN, - 'b_max': B_MAX, - } - - def eval(self, data, use_octree=False): - ''' - Evaluate a data point - :param data: a dict containing at least ['name'], ['image'], ['calib'], ['b_min'] and ['b_max'] tensors. - :return: - ''' - opt = self.opt - with torch.no_grad(): - self.netG.eval() - if self.netC: - self.netC.eval() - save_path = '%s/%s/result_%s.obj' % ( - opt.results_path, opt.name, data['name']) - if self.netC: - gen_mesh_color(opt, self.netG, self.netC, self.cuda, - data, save_path, use_octree=use_octree) - else: - gen_mesh(opt, self.netG, self.cuda, data, - save_path, use_octree=use_octree) - - -if __name__ == '__main__': - evaluator = Evaluator(opt) - - test_images = glob.glob(os.path.join(opt.test_folder_path, '*')) - test_images = [f for f in test_images if ( - 'png' in f or 'jpg' in f) and (not 'mask' in f)] - test_masks = [f[:-4]+'_mask.png' for f in test_images] - - print("num; ", len(test_masks)) - - for image_path, mask_path in tqdm.tqdm(zip(test_images, test_masks)): - try: - print(image_path, mask_path) - data = evaluator.load_image(image_path, mask_path) - evaluator.eval(data, True) - except Exception as e: - print("error:", e.args) diff --git a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/open_clip/tokenizer.py b/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/open_clip/tokenizer.py deleted file mode 100644 index ee4d28450ec5dd12a79daf38cf3088e9e73c2cd5..0000000000000000000000000000000000000000 --- a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/open_clip/tokenizer.py +++ /dev/null @@ -1,197 +0,0 @@ -""" CLIP tokenizer - -Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. -""" -import gzip -import html -import os -from functools import lru_cache -from typing import Union, List - -import ftfy -import regex as re -import torch - - -@lru_cache() -def default_bpe(): - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz" - ) - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -def basic_clean(text): - text = ftfy.fix_text(text) - text = html.unescape(html.unescape(text)) - return text.strip() - - -def whitespace_clean(text): - text = re.sub(r"\s+", " ", text) - text = text.strip() - return text - - -class SimpleTokenizer(object): - def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - merges = gzip.open(bpe_path).read().decode("utf-8").split("\n") - merges = merges[1 : 49152 - 256 - 2 + 1] - merges = [tuple(merge.split()) for merge in merges] - vocab = list(bytes_to_unicode().values()) - vocab = vocab + [v + "" for v in vocab] - for merge in merges: - vocab.append("".join(merge)) - if not special_tokens: - special_tokens = ["", ""] - else: - special_tokens = ["", ""] + special_tokens - vocab.extend(special_tokens) - self.encoder = dict(zip(vocab, range(len(vocab)))) - self.decoder = {v: k for k, v in self.encoder.items()} - self.bpe_ranks = dict(zip(merges, range(len(merges)))) - self.cache = {t: t for t in special_tokens} - special = "|".join(special_tokens) - self.pat = re.compile( - special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", - re.IGNORECASE, - ) - - self.vocab_size = len(self.encoder) - self.all_special_ids = [self.encoder[t] for t in special_tokens] - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token[:-1]) + (token[-1] + "",) - pairs = get_pairs(word) - - if not pairs: - return token + "" - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - text = whitespace_clean(basic_clean(text)).lower() - for token in re.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend( - self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") - ) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = ( - bytearray([self.byte_decoder[c] for c in text]) - .decode("utf-8", errors="replace") - .replace("", " ") - ) - return text - - -_tokenizer = SimpleTokenizer() - - -def tokenize( - texts: Union[str, List[str]], context_length: int = 77 -) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - context_length : int - The context length to use; all CLIP models use 77 as the context length - - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder[""] - eot_token = _tokenizer.encoder[""] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - tokens = tokens[:context_length] # Truncate - result[i, : len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/bfm.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/bfm.py deleted file mode 100644 index 74ea0be57ef63a1732940e9088184a1da0b4e0d0..0000000000000000000000000000000000000000 --- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/bfm.py +++ /dev/null @@ -1,331 +0,0 @@ -"""This script defines the parametric 3d face model for Deep3DFaceRecon_pytorch -""" - -import numpy as np -import torch -import torch.nn.functional as F -from scipy.io import loadmat -from sad_talker.src.face3d.util.load_mats import transferBFM09 -import os - -def perspective_projection(focal, center): - # return p.T (N, 3) @ (3, 3) - return np.array([ - focal, 0, center, - 0, focal, center, - 0, 0, 1 - ]).reshape([3, 3]).astype(np.float32).transpose() - -class SH: - def __init__(self): - self.a = [np.pi, 2 * np.pi / np.sqrt(3.), 2 * np.pi / np.sqrt(8.)] - self.c = [1/np.sqrt(4 * np.pi), np.sqrt(3.) / np.sqrt(4 * np.pi), 3 * np.sqrt(5.) / np.sqrt(12 * np.pi)] - - - -class ParametricFaceModel: - def __init__(self, - bfm_folder='./BFM', - recenter=True, - camera_distance=10., - init_lit=np.array([ - 0.8, 0, 0, 0, 0, 0, 0, 0, 0 - ]), - focal=1015., - center=112., - is_train=True, - default_name='BFM_model_front.mat'): - - if not os.path.isfile(os.path.join(bfm_folder, default_name)): - transferBFM09(bfm_folder) - - model = loadmat(os.path.join(bfm_folder, default_name)) - # mean face shape. [3*N,1] - self.mean_shape = model['meanshape'].astype(np.float32) - # identity basis. [3*N,80] - self.id_base = model['idBase'].astype(np.float32) - # expression basis. [3*N,64] - self.exp_base = model['exBase'].astype(np.float32) - # mean face texture. [3*N,1] (0-255) - self.mean_tex = model['meantex'].astype(np.float32) - # texture basis. [3*N,80] - self.tex_base = model['texBase'].astype(np.float32) - # face indices for each vertex that lies in. starts from 0. [N,8] - self.point_buf = model['point_buf'].astype(np.int64) - 1 - # vertex indices for each face. starts from 0. [F,3] - self.face_buf = model['tri'].astype(np.int64) - 1 - # vertex indices for 68 landmarks. starts from 0. [68,1] - self.keypoints = np.squeeze(model['keypoints']).astype(np.int64) - 1 - - if is_train: - # vertex indices for small face region to compute photometric error. starts from 0. - self.front_mask = np.squeeze(model['frontmask2_idx']).astype(np.int64) - 1 - # vertex indices for each face from small face region. starts from 0. [f,3] - self.front_face_buf = model['tri_mask2'].astype(np.int64) - 1 - # vertex indices for pre-defined skin region to compute reflectance loss - self.skin_mask = np.squeeze(model['skinmask']) - - if recenter: - mean_shape = self.mean_shape.reshape([-1, 3]) - mean_shape = mean_shape - np.mean(mean_shape, axis=0, keepdims=True) - self.mean_shape = mean_shape.reshape([-1, 1]) - - self.persc_proj = perspective_projection(focal, center) - self.device = 'cpu' - self.camera_distance = camera_distance - self.SH = SH() - self.init_lit = init_lit.reshape([1, 1, -1]).astype(np.float32) - - - def to(self, device): - self.device = device - for key, value in self.__dict__.items(): - if type(value).__module__ == np.__name__: - setattr(self, key, torch.tensor(value).to(device)) - - - def compute_shape(self, id_coeff, exp_coeff): - """ - Return: - face_shape -- torch.tensor, size (B, N, 3) - - Parameters: - id_coeff -- torch.tensor, size (B, 80), identity coeffs - exp_coeff -- torch.tensor, size (B, 64), expression coeffs - """ - batch_size = id_coeff.shape[0] - id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff) - exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff) - face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1]) - return face_shape.reshape([batch_size, -1, 3]) - - - def compute_texture(self, tex_coeff, normalize=True): - """ - Return: - face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.) - - Parameters: - tex_coeff -- torch.tensor, size (B, 80) - """ - batch_size = tex_coeff.shape[0] - face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex - if normalize: - face_texture = face_texture / 255. - return face_texture.reshape([batch_size, -1, 3]) - - - def compute_norm(self, face_shape): - """ - Return: - vertex_norm -- torch.tensor, size (B, N, 3) - - Parameters: - face_shape -- torch.tensor, size (B, N, 3) - """ - - v1 = face_shape[:, self.face_buf[:, 0]] - v2 = face_shape[:, self.face_buf[:, 1]] - v3 = face_shape[:, self.face_buf[:, 2]] - e1 = v1 - v2 - e2 = v2 - v3 - face_norm = torch.cross(e1, e2, dim=-1) - face_norm = F.normalize(face_norm, dim=-1, p=2) - face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1) - - vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2) - vertex_norm = F.normalize(vertex_norm, dim=-1, p=2) - return vertex_norm - - - def compute_color(self, face_texture, face_norm, gamma): - """ - Return: - face_color -- torch.tensor, size (B, N, 3), range (0, 1.) - - Parameters: - face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.) - face_norm -- torch.tensor, size (B, N, 3), rotated face normal - gamma -- torch.tensor, size (B, 27), SH coeffs - """ - batch_size = gamma.shape[0] - v_num = face_texture.shape[1] - a, c = self.SH.a, self.SH.c - gamma = gamma.reshape([batch_size, 3, 9]) - gamma = gamma + self.init_lit - gamma = gamma.permute(0, 2, 1) - Y = torch.cat([ - a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device), - -a[1] * c[1] * face_norm[..., 1:2], - a[1] * c[1] * face_norm[..., 2:], - -a[1] * c[1] * face_norm[..., :1], - a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2], - -a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:], - 0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1), - -a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:], - 0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2) - ], dim=-1) - r = Y @ gamma[..., :1] - g = Y @ gamma[..., 1:2] - b = Y @ gamma[..., 2:] - face_color = torch.cat([r, g, b], dim=-1) * face_texture - return face_color - - - def compute_rotation(self, angles): - """ - Return: - rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat - - Parameters: - angles -- torch.tensor, size (B, 3), radian - """ - - batch_size = angles.shape[0] - ones = torch.ones([batch_size, 1]).to(self.device) - zeros = torch.zeros([batch_size, 1]).to(self.device) - x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:], - - rot_x = torch.cat([ - ones, zeros, zeros, - zeros, torch.cos(x), -torch.sin(x), - zeros, torch.sin(x), torch.cos(x) - ], dim=1).reshape([batch_size, 3, 3]) - - rot_y = torch.cat([ - torch.cos(y), zeros, torch.sin(y), - zeros, ones, zeros, - -torch.sin(y), zeros, torch.cos(y) - ], dim=1).reshape([batch_size, 3, 3]) - - rot_z = torch.cat([ - torch.cos(z), -torch.sin(z), zeros, - torch.sin(z), torch.cos(z), zeros, - zeros, zeros, ones - ], dim=1).reshape([batch_size, 3, 3]) - - rot = rot_z @ rot_y @ rot_x - return rot.permute(0, 2, 1) - - - def to_camera(self, face_shape): - face_shape[..., -1] = self.camera_distance - face_shape[..., -1] - return face_shape - - def to_image(self, face_shape): - """ - Return: - face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction - - Parameters: - face_shape -- torch.tensor, size (B, N, 3) - """ - # to image_plane - face_proj = face_shape @ self.persc_proj - face_proj = face_proj[..., :2] / face_proj[..., 2:] - - return face_proj - - - def transform(self, face_shape, rot, trans): - """ - Return: - face_shape -- torch.tensor, size (B, N, 3) pts @ rot + trans - - Parameters: - face_shape -- torch.tensor, size (B, N, 3) - rot -- torch.tensor, size (B, 3, 3) - trans -- torch.tensor, size (B, 3) - """ - return face_shape @ rot + trans.unsqueeze(1) - - - def get_landmarks(self, face_proj): - """ - Return: - face_lms -- torch.tensor, size (B, 68, 2) - - Parameters: - face_proj -- torch.tensor, size (B, N, 2) - """ - return face_proj[:, self.keypoints] - - def split_coeff(self, coeffs): - """ - Return: - coeffs_dict -- a dict of torch.tensors - - Parameters: - coeffs -- torch.tensor, size (B, 256) - """ - id_coeffs = coeffs[:, :80] - exp_coeffs = coeffs[:, 80: 144] - tex_coeffs = coeffs[:, 144: 224] - angles = coeffs[:, 224: 227] - gammas = coeffs[:, 227: 254] - translations = coeffs[:, 254:] - return { - 'id': id_coeffs, - 'exp': exp_coeffs, - 'tex': tex_coeffs, - 'angle': angles, - 'gamma': gammas, - 'trans': translations - } - def compute_for_render(self, coeffs): - """ - Return: - face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate - face_color -- torch.tensor, size (B, N, 3), in RGB order - landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction - Parameters: - coeffs -- torch.tensor, size (B, 257) - """ - coef_dict = self.split_coeff(coeffs) - face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp']) - rotation = self.compute_rotation(coef_dict['angle']) - - - face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans']) - face_vertex = self.to_camera(face_shape_transformed) - - face_proj = self.to_image(face_vertex) - landmark = self.get_landmarks(face_proj) - - face_texture = self.compute_texture(coef_dict['tex']) - face_norm = self.compute_norm(face_shape) - face_norm_roted = face_norm @ rotation - face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma']) - - return face_vertex, face_texture, face_color, landmark - - def compute_for_render_woRotation(self, coeffs): - """ - Return: - face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate - face_color -- torch.tensor, size (B, N, 3), in RGB order - landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction - Parameters: - coeffs -- torch.tensor, size (B, 257) - """ - coef_dict = self.split_coeff(coeffs) - face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp']) - #rotation = self.compute_rotation(coef_dict['angle']) - - - #face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans']) - face_vertex = self.to_camera(face_shape) - - face_proj = self.to_image(face_vertex) - landmark = self.get_landmarks(face_proj) - - face_texture = self.compute_texture(coef_dict['tex']) - face_norm = self.compute_norm(face_shape) - face_norm_roted = face_norm # @ rotation - face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma']) - - return face_vertex, face_texture, face_color, landmark - - -if __name__ == '__main__': - transferBFM09() \ No newline at end of file diff --git "a/spaces/diacanFperku/AutoGPT/Adobe Universal Patcher (Latest CC 2014) Is Here\302\240!!!.md" "b/spaces/diacanFperku/AutoGPT/Adobe Universal Patcher (Latest CC 2014) Is Here\302\240!!!.md" deleted file mode 100644 index 43155274cc1905f6cf881ab87bea53e133872fef..0000000000000000000000000000000000000000 --- "a/spaces/diacanFperku/AutoGPT/Adobe Universal Patcher (Latest CC 2014) Is Here\302\240!!!.md" +++ /dev/null @@ -1,98 +0,0 @@ - -

              Adobe Universal Patcher (Latest CC 2014) is Here !!!

              -

              Do you want to use the latest Adobe products without paying any fees or subscriptions? Do you want to activate any Adobe CC 2014 product with just a few clicks? If yes, then you need to download Adobe Universal Patcher, an amazing tool created by PainteR that can crack almost all the products of Adobe CC 2014. In this article, we will show you how to download, install, and use Adobe Universal Patcher for CC 2014 and enjoy its full features.

              -

              What is Adobe Universal Patcher?

              -

              Adobe Universal Patcher is a small but powerful tool that can patch any Adobe CC 2014 product with a single click. It works by replacing the original amtlib.dll file with a modified one that can bypass the activation process and unlock all the features of the product. Adobe Universal Patcher can patch almost all the products of Adobe CC 2014, such as Photoshop, Illustrator, InDesign, Premiere Pro, After Effects, Dreamweaver, Muse, Flash, and more. It is compatible with Windows and Mac OS X platforms.

              -

              Adobe Universal Patcher (Latest CC 2014) is Here !!!


              DOWNLOAD »»» https://gohhs.com/2uFVFp



              -

              What are the advantages of using Adobe Universal Patcher?

              -

              By using Adobe Universal Patcher for CC 2014, you can enjoy many advantages that will enhance your experience with Adobe products. Some of the advantages are:

              -
                -
              • You can use any Adobe CC 2014 product for free without paying any fees or subscriptions.
              • -
              • You can use any Adobe CC 2014 product for unlimited time without any expiration date or trial period.
              • -
              • You can use any Adobe CC 2014 product for any purpose without any restrictions or limitations.
              • -
              • You can use any Adobe CC 2014 product for any project without any compatibility issues or errors.
              • -
              • You can use any Adobe CC 2014 product for any platform without any installation problems or system requirements.
              • -
              -

              How to download Adobe Universal Patcher?

              -

              If you want to download Adobe Universal Patcher for CC 2014, you need to follow these steps:

              -
                -
              1. Click on this link to download the Adobe Universal Patcher CC 2014.rar file. This file contains the Adobe Universal Patcher for Windows and Mac OS X.
              2. -
              3. Extract the rar file using a software like WinRAR or 7-Zip.
              4. -
              5. You will get two folders: one for Windows and one for Mac OS X. Choose the folder that matches your operating system.
              6. -
              7. You have successfully downloaded Adobe Universal Patcher for CC 2014.
              8. -
              -

              How to install and use Adobe Universal Patcher?

              -

              If you want to install and use Adobe Universal Patcher for CC 2014, you need to follow these steps (taking Photoshop CC 2014 as an example):

              -
                -
              1. Download the setup of Photoshop CC 2014 from this link. This file contains the Photoshop CC 2014 software for Windows and Mac OS X.
              2. -
              3. Run the setup file and follow the instructions to install Photoshop CC 2014 on your computer.
              4. -
              5. Choose Try in the first screen and click Sign In in the next window. If you have already logged in your computer, click Not Your Adobe ID.
              6. -
              7. It will show an error, then click Sign In later and accept the license terms and install.
              8. -
              9. When the setup finishes, click Launch Now. In the next window, click Sign In Later again.
              10. -
              11. You will get a trial notification. Just click Start Trial there and wait till Photoshop CC 2014 loads completely then close it.
              12. -
              13. Now run the Adobe Universal Patcher for CC 2014 that you downloaded earlier. Click Patch drop-down list and select Photoshop CC (64 bit) if you have a 64-bit system or Photoshop CC if you have a 32-bit system.
              14. -
              15. Now click Patch button and browse to the installation path of Photoshop CC 2014 - ex: C:\Program Files\Adobe\Adobe Photoshop CC 2014. Select amtlib.dll file and click Open.
              16. -
              17. Wait till patching finishes and close the patcher.
              18. -
              19. You have successfully installed and used Adobe Universal Patcher for CC 2014. You can now launch Photoshop CC 2014 and enjoy its full features.
              20. -
              -

              Conclusion

              -

              Adobe Universal Patcher is a wonderful tool that can crack any Adobe CC 2014 product with ease. It allows you to use the latest Adobe products without any limitations or costs. You can download, install, and use Adobe Universal Patcher for CC 2014 from this article and enjoy its full features. However, we recommend that you buy the original software from Adobe if you can afford it, as it will support the developers and ensure future updates and improvements.

              -

              How to update Adobe CC 2014 products with Adobe Universal Patcher?

              -

              If you want to update your Adobe CC 2014 products with Adobe Universal Patcher, you need to follow these steps:

              -
                -
              1. Download the Adobe Offline Update installer 7.1 from this link. This tool will help you to install the latest updates for your Adobe CC 2014 products without killing the patch.
              2. -
              3. Run the tool and select the product that you want to update from the list.
              4. -
              5. Click on Download and Install button and wait for the process to complete.
              6. -
              7. You have successfully updated your Adobe CC 2014 product with Adobe Universal Patcher.
              8. -
              -

              How to use XFORCE keygen for Adobe CC 2014 products?

              -

              If you are a Mac user or if the Adobe Universal Patcher doesn't work for you, you can use XFORCE keygen for Adobe CC 2014 products. XFORCE keygen is another tool that can generate serial keys and activation codes for any Adobe CC 2014 product. To use XFORCE keygen for Adobe CC 2014 products, you need to follow these steps:

              -
                -
              1. Download the XFORCE keygen for Windows or Mac OS X from this link.
              2. -
              3. Extract the zip file and run the keygen as administrator (Windows) or with sudo (Mac OS X).
              4. -
              5. Select the product that you want to activate from the drop-down list.
              6. -
              7. Click on Generate button and copy the serial number.
              8. -
              9. Run the setup of your Adobe CC 2014 product and enter the serial number when prompted.
              10. -
              11. When the installation is finished, run the product and click on Activate.
              12. -
              13. Select Offline Activation and click on Generate Request Code.
              14. -
              15. Copy the request code and paste it in the keygen.
              16. -
              17. Click on Generate Activation Code and copy it.
              18. -
              19. Paste the activation code in the product and click on Activate.
              20. -
              21. You have successfully activated your Adobe CC 2014 product with XFORCE keygen.
              22. -
              -

              Conclusion

              -

              Adobe Universal Patcher is a wonderful tool that can crack any Adobe CC 2014 product with ease. It allows you to use the latest Adobe products without any limitations or costs. You can download, install, and use Adobe Universal Patcher for CC 2014 from this article and enjoy its full features. However, we recommend that you buy the original software from Adobe if you can afford it, as it will support the developers and ensure future updates and improvements.

              -

              -

              How to fix common issues with Adobe Universal Patcher?

              -

              Although Adobe Universal Patcher is a reliable and effective tool, sometimes you may encounter some issues or errors while using it. Here are some common issues and their solutions:

              -
                -
              • If you get an error message saying "The application was unable to start correctly (0xc000007b). Click OK to close the application.", you need to install the Microsoft Visual C++ Redistributable Packages for Visual Studio 2013 from this link.
              • -
              • If you get an error message saying "The program can't start because api-ms-win-crt-runtime-l1-1-0.dll is missing from your computer. Try reinstalling the program to fix this problem.", you need to install the Windows Update KB2999226 from this link.
              • -
              • If you get an error message saying "The program can't start because MSVCR120.dll is missing from your computer. Try reinstalling the program to fix this problem.", you need to install the Microsoft Visual C++ Redistributable Packages for Visual Studio 2013 from this link.
              • -
              • If you get an error message saying "The program can't start because MSVCP120.dll is missing from your computer. Try reinstalling the program to fix this problem.", you need to install the Microsoft Visual C++ Redistributable Packages for Visual Studio 2013 from this link.
              • -
              • If you get an error message saying "The program can't start because VCRUNTIME140.dll is missing from your computer. Try reinstalling the program to fix this problem.", you need to install the Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019 from this link.
              • -
              • If you get an error message saying "The program can't start because MSVCP140.dll is missing from your computer. Try reinstalling the program to fix this problem.", you need to install the Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019 from this link.
              • -
              • If you get an error message saying "This patch is not applicable for you. Please check for updates from Help>Updates menu in your product.", you need to make sure that you have selected the correct product and version from the patch drop-down list.
              • -
              • If you get an error message saying "This patch does not support your version of product. Please update your product or use another patch.", you need to make sure that you have updated your product to the latest version or use another patch that supports your version.
              • -
              • If you get an error message saying "This patch does not support your platform. Please use another patch.", you need to make sure that you have downloaded and run the correct patch for your operating system (Windows or Mac OS X).
              • -
              -

              How to contact Adobe Universal Patcher support?

              -

              If you have any questions, feedback, or suggestions regarding Adobe Universal Patcher, you can contact its developer PainteR through his email address: painter@adobe.com. You can also visit his website: https://painter.ru/ for more information and updates about his work.

              -

              How to download Adobe CC 2014 products?

              -

              If you don't have the setup files of Adobe CC 2014 products, you can download them from this link. This link contains the direct download links for all Adobe CC 2014 products for Windows and Mac OS X. You can choose the product that you want to download and install from the list. You will need an Adobe account to access these links, but you don't need to pay anything or subscribe to any plan.

              -

              How to backup and restore Adobe CC 2014 products?

              -

              If you want to backup and restore your Adobe CC 2014 products, you can use the Adobe Creative Cloud Desktop app. This app allows you to manage, update, and sync your Adobe CC 2014 products across multiple devices. You can also use it to backup and restore your preferences, settings, presets, libraries, and more. To backup and restore your Adobe CC 2014 products, you need to follow these steps:

              -
                -
              1. Launch the Adobe Creative Cloud Desktop app and sign in with your Adobe account.
              2. -
              3. Click on the gear icon at the top right corner and select Preferences.
              4. -
              5. Click on Creative Cloud and then select Syncing.
              6. -
              7. Check the box next to Sync Settings and choose what you want to sync (Preferences, Presets, Libraries, etc.).
              8. -
              9. Click on Done and wait for the sync process to complete.
              10. -
              11. You have successfully backed up your Adobe CC 2014 products.
              12. -
              13. To restore your Adobe CC 2014 products, you need to repeat the same steps on another device or after reinstalling your products.
              14. -
              15. You have successfully restored your Adobe CC 2014 products.
              16. -
              -

              Conclusion

              -

              In this article, we have shown you how to download, install, and use Adobe Universal Patcher for CC 2014, a powerful tool that can crack any Adobe CC 2014 product with a single click. We have also explained how to update, backup, and restore your Adobe CC 2014 products with Adobe Universal Patcher. By using this tool, you can enjoy the latest Adobe products without any limitations or costs. However, we recommend that you buy the original software from Adobe if you can afford it, as it will support the developers and ensure future updates and improvements.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Airparrot 1.0.4 Serial Number Maker.md b/spaces/diacanFperku/AutoGPT/Airparrot 1.0.4 Serial Number Maker.md deleted file mode 100644 index 31bf2e8c3a42c5f5efe299ddcfd02a45a7ba974c..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Airparrot 1.0.4 Serial Number Maker.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Airparrot 1.0.4 Serial Number Maker


              DOWNLOAD >> https://gohhs.com/2uFUHh



              - -RadiAnt DICOM Viewer v.1.0.4 crack (Open and display DICOM files.) ... MediaHuman YouTube Downloader v.2.1 serial number maker (Download and save ... AirParrot v.1.0.3 serials key (Watch videos, view images, and mirror your desktop.) ... 1fdad05405
              -
              -
              -

              diff --git a/spaces/diacanFperku/AutoGPT/Eltima USB Network Gate 8.1.2013 Activator 6 MB !!LINK!!.md b/spaces/diacanFperku/AutoGPT/Eltima USB Network Gate 8.1.2013 Activator 6 MB !!LINK!!.md deleted file mode 100644 index 27c30a7a9fcc8caca9d3e74e1d74235aec694caa..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Eltima USB Network Gate 8.1.2013 Activator 6 MB !!LINK!!.md +++ /dev/null @@ -1,9 +0,0 @@ -
              -

              When you install Windows 10, your computer installs new drivers for built-in devices, Windows also installs new drivers and patches for your USB devices. These are referred to as "layers" in the USB protocol. Standard systems will install USB drivers that support all the necessary layers for your USB devices. Windows comes with a driver called the Universal Serial Bus Driver (USB) that provides basic USB support for many USB devices. If you have a legacy USB device, you may need to install a more specific driver. For USB Network Gate (10 shared USB devices) you will need a driver that includes support for the following layers:

              -

              Connecting the USB Network Gate to Windows 10

              Before you plug the USB Network Gate (10 shared USB devices) into your computer, you must prepare your computer for successful sharing. Follow these steps to configure Windows for sharing:

              -

              Eltima USB Network Gate 8.1.2013 Activator | 6 MB


              Downloadhttps://gohhs.com/2uFUbn



              -

              Eltima USB Network Gate for Mac comes with a free mobile app (version for iOS and Android) that lets you keep a virtual dongle on your mobile device and browse, share and synchronize the USB resource over the Internet.

              -

              The device supports 12KB/sec upload and 50KB/sec download, supporting both USB 1.1 and USB 2.0 devices. It operates on both WiFi and Ethernet connections. To verify the network connection, use the F5 button that appears on the Mac interface. You can also select the network connection by pressing the FN button.

              -

              If your network connection uses the Dynamic Host Configuration Protocol (DHCP), a DHCP server is required for Smart Mode to work. Using a Dynamic IP address allows for network connection changes without the need to manually enter the IP address. For an Ethernet connection, the IP address is usually given by the DHCP server.

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/app.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/app.py deleted file mode 100644 index 831dbd74f742501ee0d476f201017deb7a1b61f0..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/app.py +++ /dev/null @@ -1,183 +0,0 @@ -import sys, os - -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -import logging - -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -logging.basicConfig(level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s") - -logger = logging.getLogger(__name__) - -import torch -import argparse -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -import gradio as gr -import webbrowser - - -net_g = None - - -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - del word2ph - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language -import soundfile as sf -def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid): - global net_g - bert, phones, tones, lang_ids = get_text(text, "ZH", hps) - with torch.no_grad(): - x_tst=phones.to(device).unsqueeze(0) - tones=tones.to(device).unsqueeze(0) - lang_ids=lang_ids.to(device).unsqueeze(0) - bert = bert.to(device).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device) - del phones - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids, bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers - sf.write("tmp.wav", audio, 44100) - return audio -def convert_wav_to_ogg(wav_file): - os.makedirs('out', exist_ok=True) - filename = os.path.splitext(os.path.basename(wav_file.name))[0] - output_path_ogg = os.path.join('out', f"out.ogg") - - renamed_input_path = os.path.join('in', f"in.wav") - os.makedirs('in', exist_ok=True) - os.rename(wav_file.name, renamed_input_path) - command = ["ffmpeg", "-i", renamed_input_path, "-acodec", "libopus", "-y", output_path_ogg] - os.system(" ".join(command)) - return output_path_ogg -def tts_fn(text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale): - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker) - with open('tmp.wav', 'rb') as wav_file: - newogg = convert_wav_to_ogg(wav_file) - return "Success", (hps.data.sampling_rate, audio),newogg - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--model_dir", default="./logs/jiuxia/jiuxia.pth", help="path of your model") - parser.add_argument("--config_dir", default="./configs/config.json", help="path of your config file") - parser.add_argument("--share", default=False, help="make link public") - parser.add_argument("-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log") - - args = parser.parse_args() - if args.debug: - logger.info("Enable DEBUG-LEVEL log") - logging.basicConfig(level=logging.DEBUG) - hps = utils.get_hparams_from_file(args.config_dir) - device = "cuda:0" if torch.cuda.is_available() else "cpu" - ''' - device = ( - "cuda:0" - if torch.cuda.is_available() - else ( - "mps" - if sys.platform == "darwin" and torch.backends.mps.is_available() - else "cpu" - ) - ) - ''' - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(device) - _ = net_g.eval() - - _ = utils.load_checkpoint(args.model_dir, net_g, None, skip_optimizer=True) - - speaker_ids = hps.data.spk2id - speakers = list(speaker_ids.keys()) - with gr.Blocks() as app: - with gr.Row(): - with gr.Column(): - - - gr.Markdown(value=""" - 九夏 Bert-Vits2在线语音生成\n - 1、模型作者:数字星瞳企划 https://t.me/xingtong25680 \n - \n - 2、原项目地址:https://github.com/Stardust-minus/Bert-VITS2\n - 3、使用此模型进行二创请注明AI生成,以及该项目地址。\n - 4、如果想生成超长txt文本的音频请使用colab。 https://colab.research.google.com/drive/13ek8_j1aknr-pbjj3NXxSM4vBIsracU3?usp=drive_link\n - - """) - text = gr.TextArea(label="Text", placeholder="Input Text Here", - value="这里是数字星瞳企画,请在电报搜索星瞳全拼加二五六八零,获取最新更新进展。") - speaker = gr.Dropdown(choices=speakers, value=speakers[0], label='Speaker') - sdp_ratio = gr.Slider(minimum=0, maximum=1, value=0.2, step=0.01, label='语调变化') - noise_scale = gr.Slider(minimum=0.1, maximum=1.5, value=0.6, step=0.01, label='感情变化') - noise_scale_w = gr.Slider(minimum=0.1, maximum=1.4, value=0.8, step=0.01, label='音节发音长度变化') - length_scale = gr.Slider(minimum=0.1, maximum=2, value=1, step=0.01, label='语速') - btn = gr.Button("开启AI语音之旅吧!", variant="primary") - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio") - ogg_output = gr.File(label="Converted OGG file") - gr.Markdown(value=""" - 模型汇总:\n - 星瞳 https://huggingface.co/spaces/digitalxingtong/Xingtong-Bert-Vits2 \n - 星瞳 朗读专用 https://huggingface.co/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2 \n - 星瞳 长文本专用 https://huggingface.co/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2 \n - 甜甜叫花鸡 https://huggingface.co/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2 \n - 七海 https://huggingface.co/spaces/digitalxingtong/Nanami-Bert-Vits2 \n - 东雪莲 https://huggingface.co/spaces/digitalxingtong/Azuma-Bert-Vits2 \n - 嘉然 https://huggingface.co/spaces/digitalxingtong/Jiaran-Bert-Vits2 \n - 乃琳 https://huggingface.co/spaces/digitalxingtong/Eileen-Bert-Vits2 \n - 恬豆 https://huggingface.co/spaces/digitalxingtong/Dou-Bert-Vits2 \n - 奶绿 杂谈 https://huggingface.co/spaces/digitalxingtong/Nailv-Bert-Vits2 \n - 奶绿 朗读 https://huggingface.co/spaces/digitalxingtong/Nailv-read-Bert-Vits2 \n - 露早 https://huggingface.co/spaces/digitalxingtong/Luzao-Bert-Vits2 \n - 柚恩 https://huggingface.co/spaces/digitalxingtong/Un-Bert-Vits2 \n - 米诺 https://huggingface.co/spaces/digitalxingtong/Minuo-Bert-Vits2 \n - 扇宝 https://huggingface.co/spaces/digitalxingtong/Shanbao-Bert-Vits2 \n - 牧牧白 https://huggingface.co/spaces/digitalxingtong/Miiu-Bert-Vits2 \n - 吉诺儿kino https://huggingface.co/spaces/digitalxingtong/Kino-Bert-Vits2 \n - 九夏 https://huggingface.co/spaces/digitalxingtong/Jiuxia-Bert-Vits2 \n - 卡缇娅 https://huggingface.co/spaces/digitalxingtong/Yaya-Bert-Vits2 \n - 理想_ideal https://huggingface.co/spaces/digitalxingtong/Lixiang-Bert-Vits2 \n - 阿梓 https://huggingface.co/spaces/digitalxingtong/Azusa-Bert-Vits2 \n - 鹿鸣 https://huggingface.co/spaces/digitalxingtong/Luming-Bert-Vits2 \n - 永雏塔菲 https://huggingface.co/spaces/digitalxingtong/Taffy-Bert-VITS2 \n - """) - btn.click(tts_fn, - inputs=[text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale], - outputs=[text_output, audio_output,ogg_output]) - - - app.launch(show_error=True) diff --git a/spaces/dineshreddy/WALT/walt/datasets/pipelines/__init__.py b/spaces/dineshreddy/WALT/walt/datasets/pipelines/__init__.py deleted file mode 100644 index c6f424debd1623e7511dd77da464a6639d816745..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/walt/datasets/pipelines/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform, - ContrastTransform, EqualizeTransform, Rotate, Shear, - Translate) -from .compose import Compose -from .formating import (Collect, DefaultFormatBundle, ImageToTensor, - ToDataContainer, ToTensor, Transpose, to_tensor) -from .instaboost import InstaBoost -from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam, - LoadMultiChannelImageFromFiles, LoadProposals) -from .test_time_aug import MultiScaleFlipAug -from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize, - Pad, PhotoMetricDistortion, RandomCenterCropPad, - RandomCrop, RandomFlip, Resize, SegRescale) - -__all__ = [ - 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', - 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', - 'LoadImageFromFile', 'LoadImageFromWebcam', - 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug', - 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale', - 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu', - 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear', - 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform', - 'ContrastTransform', 'Translate' -] diff --git a/spaces/edemgold/Tone-Transfer/app.py b/spaces/edemgold/Tone-Transfer/app.py deleted file mode 100644 index 63ae688a84c45c6c8f9e1fa4d24eee3c2b77aee8..0000000000000000000000000000000000000000 --- a/spaces/edemgold/Tone-Transfer/app.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -#Importing dependancies -from styleformer import Styleformer -import gradio as gr -import torch -import warnings -warnings.filterwarnings("ignore") - -def set_seed(seed): - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(seed) - -set_seed(1234) - -#Casual-Formal -sf_0 = Styleformer(style=0) - -#Formal-Casual -sf_1 = Styleformer(style=1) - -#Active-Passive -sf_2 = Styleformer(style=2) - -#Passive-Active -sf_3 = Styleformer(style=3) - -def func(text, tone): - if tone=="Casual-Formal": - return sf_0.transfer(text) - if tone=="Formal-Casual": - return sf_1.transfer(text) - if tone=="Active-Passive": - return sf_2.transfer(text) - if tone=="Passive-Active": - return sf_3.transfer(text) - else: - return "No available Transfers😭" - -#Initalizing Gradio App -app_description = "This model transforms the tone of text, from formal to informal, from Active to Passive. Choose your option below." -app_title = "Tone Transfer" - -app = gr.Interface(func,["text",gr.inputs.Radio(["Casual-Formal", "Formal-Casual", "Active-Passive","Passive-Active"])],"text",description=app_description, title=app_title) - -app.launch() \ No newline at end of file diff --git a/spaces/enesbol/case_dif/main.py b/spaces/enesbol/case_dif/main.py deleted file mode 100644 index 5485cbab9cc8e2a51e7dad75eda1f8b94f585cb0..0000000000000000000000000000000000000000 --- a/spaces/enesbol/case_dif/main.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import pprint -import random -import warnings -import torch -import numpy as np -from trainer import Trainer, Tester -from inference import Inference - -from config import getConfig -warnings.filterwarnings('ignore') -args = getConfig() - - -def main(args): - print('<---- Training Params ---->') - pprint.pprint(args) - - # Random Seed - seed = args.seed - os.environ['PYTHONHASHSEED'] = str(seed) - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) # if use multi-GPU - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - if args.action == 'train': - save_path = os.path.join(args.model_path, args.dataset, f'TE{args.arch}_{str(args.exp_num)}') - - # Create model directory - os.makedirs(save_path, exist_ok=True) - Trainer(args, save_path) - - elif args.action == 'test': - save_path = os.path.join(args.model_path, args.dataset, f'TE{args.arch}_{str(args.exp_num)}') - datasets = ['DUTS', 'DUT-O', 'HKU-IS', 'ECSSD', 'PASCAL-S'] - - for dataset in datasets: - args.dataset = dataset - test_loss, test_mae, test_maxf, test_avgf, test_s_m = Tester(args, save_path).test() - - print(f'Test Loss:{test_loss:.3f} | MAX_F:{test_maxf:.4f} ' - f'| AVG_F:{test_avgf:.4f} | MAE:{test_mae:.4f} | S_Measure:{test_s_m:.4f}') - else: - save_path = os.path.join(args.model_path, args.dataset, f'TE{args.arch}_{str(args.exp_num)}') - - print('<----- Initializing inference mode ----->') - Inference(args, save_path).test() - - -if __name__ == '__main__': - main(args) \ No newline at end of file diff --git a/spaces/ennet/ChatDev/online_log/static/css/style.css b/spaces/ennet/ChatDev/online_log/static/css/style.css deleted file mode 100644 index 1595bba089d173d3eec4bfb1f2788a1c61a77cb6..0000000000000000000000000000000000000000 --- a/spaces/ennet/ChatDev/online_log/static/css/style.css +++ /dev/null @@ -1,126 +0,0 @@ -.container { - margin-top: 20px; - margin-bottom: 20px; - width: 2500px; - height: 700px; - border: 1px solid black; - overflow-y: scroll; - background-color: white; - position: relative; - scroll-behavior: smooth; -} - -.container::-webkit-scrollbar { - width: 0; - background-color: transparent; -} - -.message-container { - position: relative; - display: flex; - margin: 20px; - max-width: 95%; - word-wrap: break-word; - padding-top: 0px; /* Add space for the button */ -} - - -.message-text { - background-color: lightgray; - border-radius: 10px; - padding: 8px; - margin-left: 40px; - font-size: 10px; - width: 100%; -} - -.avatar { - width: 40px; - height: 40px; - border-radius: 50%; - position: absolute; - top: 0px; - left: -30px; - margin-left: 20px; - background-color: green; - background-size: cover; -} - -.role { - font-size: 12px; - font-weight: bold; - position: absolute; - bottom: 0; - top: -30px; - margin-top: 10px; - margin-left: 40px; -} - -.code-block pre { - margin: 0; -} - -.dark { - color: #000000; -} - -.line-numbers .line-numbers-rows { - border-right-color: #44475a; - display: block; /* Add this to ensure line numbers are displayed */ -} - -.copy-button { - position: absolute; - top: 1px; - right: 0px; - background-color: #7a7c7f; - color: #f8f8f2; - border: none; - padding: 5px 10px; - border-radius: 4px; - cursor: pointer; -} - -.code-block-header { - background-color: #5b5656; - color: #ffffff; - padding: 5px; - font-size: 14px; - font-weight: bold; -} - - -.code-block { - background-color: #000000 !important; - border-radius: 4px; - margin-top: 10px; - position: relative; - overflow: hidden; - color: #000000; /* Add text color for syntax highlighting */ -} - -table { - border-collapse: collapse; - width: 100%; -} - -th, td { - border: 1px solid black; - padding: 8px; - text-align: left; - white-space: pre-line; -} - -.expand-button { - position: absolute; - top: 2px; - right: 2px; - border-radius: 4px; - background-color: transparent; - border: none; - padding: 5px; - cursor: pointer; - font-size: 8px; - font-weight: bold; - color: rgb(108, 99, 99); -} diff --git a/spaces/esb/leaderboard/README.md b/spaces/esb/leaderboard/README.md deleted file mode 100644 index 4dcbaf467f5694144a64b5ff157e90d215196004..0000000000000000000000000000000000000000 --- a/spaces/esb/leaderboard/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: leaderboard -emoji: 🏆 -colorFrom: indigo -colorTo: gray -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/evaluate-metric/squad_v2/compute_score.py b/spaces/evaluate-metric/squad_v2/compute_score.py deleted file mode 100644 index 3b512ae92484772a8d10bdf43f4df68520cdf3ed..0000000000000000000000000000000000000000 --- a/spaces/evaluate-metric/squad_v2/compute_score.py +++ /dev/null @@ -1,323 +0,0 @@ -"""Official evaluation script for SQuAD version 2.0. - -In addition to basic functionality, we also compute additional statistics and -plot precision-recall curves if an additional na_prob.json file is provided. -This file is expected to map question ID's to the model's predicted probability -that a question is unanswerable. -""" -import argparse -import collections -import json -import os -import re -import string -import sys - -import numpy as np - - -ARTICLES_REGEX = re.compile(r"\b(a|an|the)\b", re.UNICODE) - -OPTS = None - - -def parse_args(): - parser = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.") - parser.add_argument("data_file", metavar="data.json", help="Input data JSON file.") - parser.add_argument("pred_file", metavar="pred.json", help="Model predictions.") - parser.add_argument( - "--out-file", "-o", metavar="eval.json", help="Write accuracy metrics to file (default is stdout)." - ) - parser.add_argument( - "--na-prob-file", "-n", metavar="na_prob.json", help="Model estimates of probability of no answer." - ) - parser.add_argument( - "--na-prob-thresh", - "-t", - type=float, - default=1.0, - help='Predict "" if no-answer probability exceeds this (default = 1.0).', - ) - parser.add_argument( - "--out-image-dir", "-p", metavar="out_images", default=None, help="Save precision-recall curves to directory." - ) - parser.add_argument("--verbose", "-v", action="store_true") - if len(sys.argv) == 1: - parser.print_help() - sys.exit(1) - return parser.parse_args() - - -def make_qid_to_has_ans(dataset): - qid_to_has_ans = {} - for article in dataset: - for p in article["paragraphs"]: - for qa in p["qas"]: - qid_to_has_ans[qa["id"]] = bool(qa["answers"]["text"]) - return qid_to_has_ans - - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - - def remove_articles(text): - return ARTICLES_REGEX.sub(" ", text) - - def white_space_fix(text): - return " ".join(text.split()) - - def remove_punc(text): - exclude = set(string.punctuation) - return "".join(ch for ch in text if ch not in exclude) - - def lower(text): - return text.lower() - - return white_space_fix(remove_articles(remove_punc(lower(s)))) - - -def get_tokens(s): - if not s: - return [] - return normalize_answer(s).split() - - -def compute_exact(a_gold, a_pred): - return int(normalize_answer(a_gold) == normalize_answer(a_pred)) - - -def compute_f1(a_gold, a_pred): - gold_toks = get_tokens(a_gold) - pred_toks = get_tokens(a_pred) - common = collections.Counter(gold_toks) & collections.Counter(pred_toks) - num_same = sum(common.values()) - if len(gold_toks) == 0 or len(pred_toks) == 0: - # If either is no-answer, then F1 is 1 if they agree, 0 otherwise - return int(gold_toks == pred_toks) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(pred_toks) - recall = 1.0 * num_same / len(gold_toks) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - - -def get_raw_scores(dataset, preds): - exact_scores = {} - f1_scores = {} - for article in dataset: - for p in article["paragraphs"]: - for qa in p["qas"]: - qid = qa["id"] - gold_answers = [t for t in qa["answers"]["text"] if normalize_answer(t)] - if not gold_answers: - # For unanswerable questions, only correct answer is empty string - gold_answers = [""] - if qid not in preds: - print(f"Missing prediction for {qid}") - continue - a_pred = preds[qid] - # Take max over all gold answers - exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) - f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) - return exact_scores, f1_scores - - -def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): - new_scores = {} - for qid, s in scores.items(): - pred_na = na_probs[qid] > na_prob_thresh - if pred_na: - new_scores[qid] = float(not qid_to_has_ans[qid]) - else: - new_scores[qid] = s - return new_scores - - -def make_eval_dict(exact_scores, f1_scores, qid_list=None): - if not qid_list: - total = len(exact_scores) - return collections.OrderedDict( - [ - ("exact", 100.0 * sum(exact_scores.values()) / total), - ("f1", 100.0 * sum(f1_scores.values()) / total), - ("total", total), - ] - ) - else: - total = len(qid_list) - return collections.OrderedDict( - [ - ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), - ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), - ("total", total), - ] - ) - - -def merge_eval(main_eval, new_eval, prefix): - for k in new_eval: - main_eval[f"{prefix}_{k}"] = new_eval[k] - - -def plot_pr_curve(precisions, recalls, out_image, title): - plt.step(recalls, precisions, color="b", alpha=0.2, where="post") - plt.fill_between(recalls, precisions, step="post", alpha=0.2, color="b") - plt.xlabel("Recall") - plt.ylabel("Precision") - plt.xlim([0.0, 1.05]) - plt.ylim([0.0, 1.05]) - plt.title(title) - plt.savefig(out_image) - plt.clf() - - -def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, out_image=None, title=None): - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - true_pos = 0.0 - cur_p = 1.0 - cur_r = 0.0 - precisions = [1.0] - recalls = [0.0] - avg_prec = 0.0 - for i, qid in enumerate(qid_list): - if qid_to_has_ans[qid]: - true_pos += scores[qid] - cur_p = true_pos / float(i + 1) - cur_r = true_pos / float(num_true_pos) - if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: - # i.e., if we can put a threshold after this point - avg_prec += cur_p * (cur_r - recalls[-1]) - precisions.append(cur_p) - recalls.append(cur_r) - if out_image: - plot_pr_curve(precisions, recalls, out_image, title) - return {"ap": 100.0 * avg_prec} - - -def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): - if out_image_dir and not os.path.exists(out_image_dir): - os.makedirs(out_image_dir) - num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) - if num_true_pos == 0: - return - pr_exact = make_precision_recall_eval( - exact_raw, - na_probs, - num_true_pos, - qid_to_has_ans, - out_image=os.path.join(out_image_dir, "pr_exact.png"), - title="Precision-Recall curve for Exact Match score", - ) - pr_f1 = make_precision_recall_eval( - f1_raw, - na_probs, - num_true_pos, - qid_to_has_ans, - out_image=os.path.join(out_image_dir, "pr_f1.png"), - title="Precision-Recall curve for F1 score", - ) - oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} - pr_oracle = make_precision_recall_eval( - oracle_scores, - na_probs, - num_true_pos, - qid_to_has_ans, - out_image=os.path.join(out_image_dir, "pr_oracle.png"), - title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)", - ) - merge_eval(main_eval, pr_exact, "pr_exact") - merge_eval(main_eval, pr_f1, "pr_f1") - merge_eval(main_eval, pr_oracle, "pr_oracle") - - -def histogram_na_prob(na_probs, qid_list, image_dir, name): - if not qid_list: - return - x = [na_probs[k] for k in qid_list] - weights = np.ones_like(x) / float(len(x)) - plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) - plt.xlabel("Model probability of no-answer") - plt.ylabel("Proportion of dataset") - plt.title(f"Histogram of no-answer probability: {name}") - plt.savefig(os.path.join(image_dir, f"na_prob_hist_{name}.png")) - plt.clf() - - -def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): - num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) - cur_score = num_no_ans - best_score = cur_score - best_thresh = 0.0 - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - for i, qid in enumerate(qid_list): - if qid not in scores: - continue - if qid_to_has_ans[qid]: - diff = scores[qid] - else: - if preds[qid]: - diff = -1 - else: - diff = 0 - cur_score += diff - if cur_score > best_score: - best_score = cur_score - best_thresh = na_probs[qid] - return 100.0 * best_score / len(scores), best_thresh - - -def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): - best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) - best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) - main_eval["best_exact"] = best_exact - main_eval["best_exact_thresh"] = exact_thresh - main_eval["best_f1"] = best_f1 - main_eval["best_f1_thresh"] = f1_thresh - - -def main(): - with open(OPTS.data_file) as f: - dataset_json = json.load(f) - dataset = dataset_json["data"] - with open(OPTS.pred_file) as f: - preds = json.load(f) - if OPTS.na_prob_file: - with open(OPTS.na_prob_file) as f: - na_probs = json.load(f) - else: - na_probs = {k: 0.0 for k in preds} - qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False - has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] - no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] - exact_raw, f1_raw = get_raw_scores(dataset, preds) - exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) - f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, OPTS.na_prob_thresh) - out_eval = make_eval_dict(exact_thresh, f1_thresh) - if has_ans_qids: - has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) - merge_eval(out_eval, has_ans_eval, "HasAns") - if no_ans_qids: - no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) - merge_eval(out_eval, no_ans_eval, "NoAns") - if OPTS.na_prob_file: - find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) - if OPTS.na_prob_file and OPTS.out_image_dir: - run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, OPTS.out_image_dir) - histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, "hasAns") - histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, "noAns") - if OPTS.out_file: - with open(OPTS.out_file, "w") as f: - json.dump(out_eval, f) - else: - print(json.dumps(out_eval, indent=2)) - - -if __name__ == "__main__": - OPTS = parse_args() - if OPTS.out_image_dir: - import matplotlib - - matplotlib.use("Agg") - import matplotlib.pyplot as plt - main() diff --git a/spaces/falterWliame/Face_Mask_Detection/Dalet Radio Suite.rar.md b/spaces/falterWliame/Face_Mask_Detection/Dalet Radio Suite.rar.md deleted file mode 100644 index 9dc5069fda092efdf799d5f4c1c9a2d4b9bdaf30..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Dalet Radio Suite.rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Dalet Radio Suite.rar


              Download Ziphttps://urlca.com/2uDdGP



              -
              -Operating System Concepts 8th Edition Solution Manual.rar . ... [FULL] Brand X Music - Discography 2003-2012 (MP3) ... Dalet radio suite.rar ... 1fdad05405
              -
              -
              -

              diff --git a/spaces/falterWliame/Face_Mask_Detection/Manualpolyboard4castellano3 !LINK!.md b/spaces/falterWliame/Face_Mask_Detection/Manualpolyboard4castellano3 !LINK!.md deleted file mode 100644 index 12b3a275e9515cc1d807fb18b363c267ea85930a..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Manualpolyboard4castellano3 !LINK!.md +++ /dev/null @@ -1,198 +0,0 @@ - -

              Manualpolyboard4castellano3: Everything You Need to Know About Your Poly CCX Business Media Phone

              - -

              Are you looking for a user manual that can help you set up, use, and troubleshoot your Poly CCX 400, 500, 600, or 700 business media phone? If so, you are in luck. Manualpolyboard4castellano3 is a PDF document that contains all the information you need to get the most out of your phone. Whether you want to make audio calls, transfer calls, record calls, park and retrieve calls, or use advanced features like Broadsoft UC-One and Remote Office, Manualpolyboard4castellano3 has got you covered. In this article, we will give you a brief overview of Manualpolyboard4castellano3 and how to download it for free.

              - -

              What is Manualpolyboard4castellano3?

              - -

              Manualpolyboard4castellano3 is a user manual that provides detailed instructions on how to use your Poly CCX business media phone with OpenSIP. OpenSIP is a standard protocol that allows you to connect your phone to any compatible SIP server or service provider. With OpenSIP, you can enjoy a variety of features and functions on your phone.

              -

              Manualpolyboard4castellano3


              Download File ►►► https://urlca.com/2uDdIC



              - -

              Manualpolyboard4castellano3 covers the following topics:

              - -
                -
              • Before You Begin: This section introduces you to the audience, purpose, and required skills of the manual, as well as the related Poly and partner resources that you can access online.
              • -
              • Getting Started: This section gives you an overview of the CCX phone hardware, LED indicators, navigation, screens, onscreen keyboard, and login and lock features.
              • -
              • Audio Calls: This section explains how to use the handset, headset, or speakerphone to place, answer, hold, resume, transfer, mute, park, retrieve, record, and manage audio calls.
              • -
              • Calling Contacts from Directories: This section shows you how to call a contact from a directory or a directory search.
              • -
              • Placing Intercom Calls: This section teaches you how to place and answer intercom calls.
              • -
              • Call Precedence and Preemption: This section describes how to handle calls with different priority levels.
              • -
              • Managing Calls Remotely with Broadworks Server: This section demonstrates how to use Broadsoft UC-One Broadworks Anywhere and Remote Office features to manage your calls from anywhere.
              • -
              • Ignoring or Rejecting Incoming Calls: This section tells you how to ignore or silence an incoming call, reject incoming calls, automatically reject calls from a contact, reject anonymous calls, and enable Do Not Disturb mode.
              • -
              - -

              Why You Need Manualpolyboard4castellano3?

              - -

              Manualpolyboard4castellano3 is a must-have document for anyone who owns or uses a Poly CCX business media phone. Here are some of the benefits of using Manualpolyboard4castellano3:

              - -
                -
              • You can learn how to set up your phone quickly and easily.
              • -
              • You can discover all the features and functions of your phone and how to use them effectively.
              • -
              • You can troubleshoot any issues or problems that you may encounter with your phone.
              • -
              • You can access additional resources and support from Poly and its partners.
              • -
              - -

              How to Download Manualpolyboard4castellano3?

              - -

              You can download Manualpolyboard4castellano3 for free from the Xiaomi Community website. Xiaomi Community is a platform where Xiaomi users can share their experiences, tips, and feedback about Xiaomi products. To download Manualpolyboard4castellano3, follow these steps:

              - -
                -
              1. Go to https://new.c.mi.com/ng/post/76797/Manualpolyboard4castellano3
              2. -
              3. Click on the Download ⚙ link at the top of the post.
              4. -
              5. Save the PDF file to your computer or device.
              6. -
              7. Open the PDF file with any PDF reader application.
              8. -
              - -

              Conclusion

              - -

              Manualpolyboard4castellano3 is a comprehensive guide for Poly CCX business media phone users who want to make the most of their phones. It provides detailed instructions on how to set up, operate, and troubleshoot your phone with OpenSIP. It also covers advanced features like Broadsoft UC-One and Remote Office. You can download Manualpolyboard4castellano3 for free from the Xiaomi Community website. If you have any questions or feedback about Manualpolyboard4castellano3 or your Poly CCX phone, feel free to contact Poly support or visit their website at https://www.poly.com/us/en/support.

              -

              How to Update Your Phone Software

              - -

              One of the advantages of using a Poly CCX business media phone is that you can always keep your phone software up to date with the latest features and security patches. Manualpolyboard4castellano3 explains how to update your phone software manually or automatically. You can also check the current software version and release date of your phone.

              - -

              To update your phone software manually, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Status > Platform > Phone Information.
              2. -
              3. Check the Software Version and Release Date fields.
              4. -
              5. If there is a newer version available, go to Settings > Basic > Update Configuration.
              6. -
              7. Select Check for Updates.
              8. -
              9. If an update is available, select Install Update.
              10. -
              11. Wait for the update to complete and your phone to restart.
              12. -
              - -

              To update your phone software automatically, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Update Configuration.
              2. -
              3. Select Automatic Updates.
              4. -
              5. Select On or Off to enable or disable automatic updates.
              6. -
              7. Select Check Interval to set how often your phone checks for updates.
              8. -
              9. Select Install Time to set when your phone installs updates.
              10. -
              - -

              How to Customize Your Phone Settings

              - -

              Another benefit of using a Poly CCX business media phone is that you can customize your phone settings according to your preferences and needs. Manualpolyboard4castellano3 shows you how to change various settings on your phone, such as language, time and date, ringtone, volume, brightness, wallpaper, and more.

              -

              - -

              To change your phone language, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Preferences.
              2. -
              3. Select Language.
              4. -
              5. Select the language you want to use on your phone.
              6. -
              - -

              To change your time and date settings, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Preferences.
              2. -
              3. Select Time & Date.
              4. -
              5. Select Time Zone and choose your time zone from the list.
              6. -
              7. Select Time Format and choose 12-hour or 24-hour format.
              8. -
              9. Select Date Format and choose the date format you want to use.
              10. -
              - -

              To change your ringtone settings, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Ring Type.
              2. -
              3. Select Default Ringtone and choose a ringtone from the list.
              4. -
              5. Select Contact Ringtone and choose a ringtone for a specific contact from the list.
              6. -
              - -

              To change your volume settings, follow these steps:

              - -
                -
              1. On your phone, press the Volume keys on the right side of the phone.
              2. -
              3. Adjust the volume level for the handset, headset, speakerphone, or ringer as needed.
              4. -
              - -

              To change your brightness settings, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Brightness Level.
              2. -
              3. Adjust the brightness level of the screen as needed.
              4. -
              - -

              To change your wallpaper settings, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Wallpaper.
              2. -
              3. Select Default Wallpaper and choose a wallpaper from the list.
              4. -
              5. Select Custom Wallpaper and choose a wallpaper from a USB flash drive or an online source.
              6. -
              -

              How to Use Your Phone with Other Devices

              - -

              One of the features of your Poly CCX business media phone is that you can use it with other devices, such as a USB flash drive, a Bluetooth headset, or a computer. Manualpolyboard4castellano3 explains how to connect your phone to these devices and how to use them with your phone.

              - -

              To use a USB flash drive with your phone, follow these steps:

              - -
                -
              1. Insert the USB flash drive into the USB port on the back of your phone.
              2. -
              3. On your phone, go to Settings > Basic > USB Configuration.
              4. -
              5. Select USB Mode and choose Mass Storage Device.
              6. -
              7. Select File Manager and browse the files on the USB flash drive.
              8. -
              9. You can view, copy, delete, or rename the files on the USB flash drive.
              10. -
              11. You can also use the USB flash drive to import or export contacts, update your phone software, or change your wallpaper.
              12. -
              - -

              To use a Bluetooth headset with your phone, follow these steps:

              - -
                -
              1. On your phone, go to Settings > Basic > Bluetooth Settings.
              2. -
              3. Select Bluetooth and turn it on.
              4. -
              5. Select Add Bluetooth Device and put your headset in pairing mode.
              6. -
              7. Select your headset from the list of available devices and enter the PIN code if required.
              8. -
              9. Your phone will pair with your headset and show a Bluetooth icon on the status bar.
              10. -
              11. You can use your headset to make and receive calls on your phone.
              12. -
              - -

              To use your computer with your phone, follow these steps:

              - -
                -
              1. Connect your computer to your phone using a USB cable or a Wi-Fi network.
              2. -
              3. On your phone, go to Settings > Basic > USB Configuration.
              4. -
              5. Select USB Mode and choose Ethernet Emulation Mode (EEM).
              6. -
              7. Your phone will act as a network adapter for your computer and share its internet connection.
              8. -
              9. You can also use Polycom BToE Connector software on your computer to control your phone from your computer.
              10. -
              - -

              How to Troubleshoot Your Phone

              - -

              Sometimes, you may encounter some issues or problems with your Poly CCX business media phone. Manualpolyboard4castellano3 provides some troubleshooting tips and solutions for common issues that you may face. Here are some examples:

              - -
                -
              • If you cannot hear any sound from your phone, check the following: -
                  -
                • Make sure that the handset, headset, or speakerphone is not muted.
                • -
                • Make sure that the volume level is not too low or too high.
                • -
                • Make sure that the handset or headset is properly connected to the phone.
                • -
                • Make sure that there is no interference from other devices or sources of noise.
                • -
                -
              • -
              • If you cannot make or receive calls on your phone, check the following: -
                  -
                • Make sure that your phone is registered to a SIP server or service provider.
                • -
                • Make sure that you have entered the correct dialing prefix or access code for outgoing calls.
                • -
                • Make sure that you have not enabled Do Not Disturb mode or call forwarding on your phone.
                • -
                • Make sure that there is no network outage or congestion that may affect your call quality or availability.
                • -
                -
              • -
              • If you cannot update your phone software on your phone, check the following: -
                  -
                • Make sure that you have downloaded the correct software version for your phone model and region.
                • -
                • Make sure that you have enough free space on your phone memory or USB flash drive to store the software file.
                • -
                • Make sure that you have a stable internet connection and power supply during the update process.
                • -
                • Make sure that you have followed the update instructions correctly and do not interrupt the update process.
                • -
                -
              • -
              - -

              If you need more help or support with your Poly CCX business media phone, you can contact Poly support or visit their website at https://www.poly.com/us/en/support. You can also access online resources and forums from Poly and its partners on their websites.

              -

              Conclusion

              - -

              Manualpolyboard4castellano3 is a comprehensive user manual that covers everything you need to know about your Poly CCX business media phone with OpenSIP. It provides detailed instructions on how to set up, operate, and troubleshoot your phone. It also covers advanced features like Broadsoft UC-One and Remote Office. You can download Manualpolyboard4castellano3 for free from the Xiaomi Community website. Manualpolyboard4castellano3 is a must-have document for anyone who owns or uses a Poly CCX business media phone. It will help you get the most out of your phone and enjoy its features and functions. If you have any questions or feedback about Manualpolyboard4castellano3 or your Poly CCX phone, feel free to contact Poly support or visit their website at https://www.poly.com/us/en/support.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Best Android Unlock Apps for Free Download - No Password Required.md b/spaces/fatiXbelha/sd/Best Android Unlock Apps for Free Download - No Password Required.md deleted file mode 100644 index dd83a5ec911a52c67e124df57041ff10dec1d158..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Best Android Unlock Apps for Free Download - No Password Required.md +++ /dev/null @@ -1,185 +0,0 @@ -
              -

              Unlock APK App: What Is It and How to Use It

              -

              Have you ever downloaded an APK file from the internet and wanted to install it on your Android device, but found out that it was locked or incompatible? Or have you ever forgotten your screen lock password or Google account password and needed to bypass the FRP lock on your Android device? If you have faced any of these situations, then you may need an unlock APK app. In this article, we will explain what an unlock APK app is, what are its benefits and risks, and how to use it on different devices.

              -

              unlock apk app


              Download Filehttps://urllie.com/2uNIHG



              -

              Introduction

              -

              An APK file is a package file that contains the installation files for an Android app. You can download APK files from various sources, such as official app stores, third-party websites, or your own device. However, not all APK files are easy to install or use. Some of them may be locked by the developer or the device manufacturer, preventing you from installing or running them. Some of them may also be incompatible with your device model, Android version, or region settings, causing errors or malfunctions.

              -

              Unlocking an APK file means removing the restrictions or limitations that prevent you from installing or using it. By unlocking an APK file, you can enjoy some benefits, such as:

              -
                -
              • Accessing more apps that are not available in your region or device
              • -
              • Installing apps that are not supported by your device or Android version
              • -
              • Modifying apps to customize their features or appearance
              • -
              • Bypassing screen lock or FRP lock on your Android device
              • -
              -

              However, unlocking an APK file also comes with some risks, such as:

              -
                -
              • Voiding your device warranty or violating the terms of service
              • -
              • Exposing your device to malware or viruses
              • -
              • Causing system instability or performance issues
              • -
              • Losing your data or settings
              • -
              -

              Therefore, before you decide to unlock an APK file, you should weigh the pros and cons carefully and take some precautions, such as backing up your data, scanning the file for malware, and checking the source and reputation of the file.

              -

              How to Unlock APK Files on Android Devices

              -

              If you want to unlock an APK file on your Android device, there are two main methods that you can use: using a device unlock app or using a file manager app. Let's see how each method works and what are some examples of apps that you can use.

              -

              unlock apk app for Samsung
              -unlock apk app for Android
              -unlock apk app without password
              -unlock apk app free download
              -unlock apk app for LG
              -unlock apk app for Huawei
              -unlock apk app for Motorola
              -unlock apk app for ZTE
              -unlock apk app for Alcatel
              -unlock apk app for HTC
              -unlock apk app for Sony
              -unlock apk app for Nokia
              -unlock apk app for Oppo
              -unlock apk app for Vivo
              -unlock apk app for Xiaomi
              -unlock apk app for Google Pixel
              -unlock apk app for OnePlus
              -unlock apk app for Lenovo
              -unlock apk app for Asus
              -unlock apk app for Acer
              -unlock apk app for Blackberry
              -unlock apk app for Kyocera
              -unlock apk app for Sharp
              -unlock apk app for TCL
              -unlock apk app for Coolpad
              -unlock apk app for FRP lock removal
              -unlock apk app for screen lock removal
              -unlock apk app for SIM network lock removal
              -unlock apk app for pattern lock removal
              -unlock apk app for PIN lock removal
              -unlock apk app for fingerprint lock removal
              -unlock apk app for face lock removal
              -unlock apk app with root access
              -unlock apk app without root access
              -unlock apk app with PC connection
              -unlock apk app without PC connection
              -best unlock apk app 2023
              -latest unlock apk app 2023
              -top-rated unlock apk app 2023
              -most popular unlock apk app 2023
              -most downloaded unlock apk app 2023
              -most reliable unlock apk app 2023
              -most user-friendly unlock apk app 2023
              -most secure unlock apk app 2023
              -most affordable unlock apk app 2023
              -most efficient unlock apk app 2023
              -most versatile unlock apk app 2023
              -most compatible unlock apk app 2023

              -

              Method 1: Using a Device Unlock App

              -

              A device unlock app is a software that can help you access your locked Android device without a password. Some of them can remove the screen lock on your phone without erasing your data. Others can unlock different types of locks, like PIN

              , pattern, password, fingerprint, or face recognition. Some of them can also bypass the FRP lock, which is a security feature that prevents you from using your device after a factory reset without verifying your Google account.

              -

              To use a device unlock app, you need to download and install it on your Android device or on another device that can connect to your locked device via USB cable. Then, you need to follow the instructions on the app to unlock your device. Depending on the app and the type of lock, you may need to enter some information, such as your device model, IMEI number, or Google account details.

              -

              Some examples of device unlock apps are:

              - - - - - - - - - - - - - - - - - - - - - -
              App NameDescriptionFeatures
              Dr.Fone - Screen Unlock (Android)A professional tool that can remove any type of screen lock on Android devices without data loss- Supports PIN, pattern, password, fingerprint, and face lock
              - Works with Samsung, LG, Motorola, Huawei, and other brands
              - Easy to use with a simple interface
              FRP Bypass APKA free app that can bypass the FRP lock on Android devices by using an OTG cable and a flash drive- Compatible with most Android devices
              - No need to use a computer
              - Requires an OTG cable and a flash drive
              iMyFone LockWiper (Android)A powerful tool that can remove any type of lock on Android devices, including screen lock and FRP lock- Supports over 6000 Android devices
              - Removes screen lock without data loss
              - Removes FRP lock without password
              - Offers a free trial version
              -

              Method 2: Using a File Manager App

              -

              A file manager app is a software that can help you access and manage the files and folders on your Android device. Some of them can also access and modify the APK files that are installed or stored on your device. By using a file manager app, you can unlock an APK file by changing its permissions, renaming it, deleting it, or moving it to another location.

              -

              To use a file manager app, you need to download and install it on your Android device. Then, you need to locate the APK file that you want to unlock and perform the desired action on it. For example, you can change the permissions of the APK file by tapping on it and selecting Properties. Then, you can check or uncheck the boxes for Read, Write, or Execute permissions. You can also rename the APK file by tapping on it and selecting Rename. Then, you can change the extension from .apk to .zip or .rar. You can also delete or move the APK file by tapping on it and selecting Delete or Move.

              -

              Some examples of file manager apps are:

              - - - - - - - - - - - - - - - - - - - - - -

              How to Unlock APK Files on PC or Mac

              -

              If you want to unlock an APK file on your PC or Mac , there are two main methods that you can use: using an Android emulator or using an APK editor. Let's see how each method works and what are some examples of software that you can use.

              -

              Method 1: Using an Android Emulator

              -

              An Android emulator is a software that can simulate an Android device on your PC or Mac. You can use an Android emulator to run and unlock APK files on your computer, just like you would do on your phone or tablet. You can also install and use device unlock apps or file manager apps on the emulator to unlock the APK files.

              -

              To use an Android emulator, you need to download and install it on your PC or Mac. Then, you need to launch the emulator and set up your Google account and device settings. Then, you need to drag and drop the APK file that you want to unlock onto the emulator screen. The emulator will automatically install and run the APK file. You can then use the emulator controls to interact with the app and unlock it.

              -

              Some examples of Android emulators are:

              -
              App NameDescriptionFeatures
              ES File ExplorerA popular and versatile app that can manage all types of files and folders on Android devices- Supports local and network storage
              - Supports cloud storage services like Google Drive, Dropbox, etc.
              - Supports ZIP and RAR files
              - Supports FTP, SMB, Bluetooth, etc.
              - Offers various tools like cleaner, analyzer, downloader, etc.
              ASTRO File ManagerA simple and user-friendly app that can organize and manage files and folders on Android devices- Supports local and cloud storage
              - Supports SD card and USB OTG
              - Supports ZIP and RAR files
              - Offers various tools like backup, search, recycle bin, etc.
              X-plore File ManagerA dual-pane app that can explore and manage files and folders on Android devices- Supports local and network storage
              - Supports cloud storage services like Google Drive, Dropbox, etc.
              - Supports ZIP, RAR, 7Z, TAR, GZIP files
              - Supports FTP, SMB, SSH, WebDAV, etc.
              - Offers various tools like hex viewer, root explorer, music player, etc.
              - - - - - - - - - - - - - - - - - - - - -

              Method 2: Using an APK Editor

              -

              An APK editor is a software that can open and edit APK files on your PC or Mac. You can use an APK editor to unlock an APK file by changing its code, resources, or metadata. You can also modify the app's features, appearance, or behavior by editing the APK file.

              -

              To use an APK editor, you need to download and install it on your PC or Mac. Then, you need to open the APK file that you want to unlock with the editor. The editor will show you the contents of the APK file, such as the manifest, classes, resources, etc. You can then edit the APK file as you wish. For example, you can change the app's name, icon, package name, version, permissions, etc. by editing the manifest file. You can also change the app's code by editing the classes.dex file. You can also change the app's graphics, sounds, fonts, etc. by editing the resources files.

              -

              Some examples of APK editors are:

              -
              Software NameDescriptionFeatures
              BlueStacksA popular and powerful emulator that can run Android apps and games on PC or Mac- Supports multiple instances and synchronization
              - Supports keyboard, mouse, and gamepad controls
              - Supports Google Play Store and other app stores
              - Offers various tools like macro recorder, screen recorder, etc.
              NoxPlayerA fast and stable emulator that can run Android apps and games on PC or Mac- Supports multiple instances and synchronization
              - Supports keyboard, mouse, and gamepad controls
              - Supports Google Play Store and other app stores
              - Offers various tools like macro recorder, screen recorder, etc.
              LDPlayerA lightweight and optimized emulator that can run Android apps and games on PC- Supports multiple instances and synchronization
              - Supports keyboard, mouse, and gamepad controls
              - Supports Google Play Store and other app stores
              - Offers various tools like macro recorder, screen recorder, etc.
              - - - - - - - - - - - -
              Software NameDescriptionFeatures
              APK Editor ProA powerful and professional tool that can edit APK files on PC or Mac- Supports full editing mode and simple editing mode
              - Supports code editing, resource editing, manifest editing, etc.
              - Supports signing and aligning APK files
              - Offers various tools like clone app, patch app, etc.
              APK Easy ToolA simple and user-friendly tool that can edit APK files on PC- Supports decompiling and recompiling APK files
              - Supports signing and aligning APK files
              - Supports batch mode for multiple APK files
              - Offers various tools like zipalign tool, sign tool, etc.
              ApktoolA command-line tool that can edit APK files on PC or Mac- Supports decompiling and recompiling APK files
              - Supports code editing, resource editing, manifest editing, etc.
              - Supports signing and aligning APK files
              - Offers various options for customization and optimization
              Conclusion -

              In this article, we have explained what an unlock APK app is, what are its benefits and risks, and how to use it on different devices . We have also shown you two methods for unlocking APK files on Android devices and two methods for unlocking APK files on PC or Mac. You can choose the method that suits your needs and preferences, but remember to be careful and responsible when unlocking APK files, as it may involve some risks and consequences.

              -

              Unlocking APK files can be a useful and fun way to access more apps, customize your device, or bypass some locks. However, it can also be a risky and complicated process that requires some technical skills and knowledge. Therefore, we recommend you to do some research and backup before you attempt to unlock any APK file. We also advise you to only download and install APK files from trusted and reputable sources, and scan them for malware before opening them.

              -

              We hope this article has helped you understand what an unlock APK app is and how to use it on different devices. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you!

              -

              FAQs

              -

              Here are some frequently asked questions about unlock APK app:

              -
                -
              1. What is the difference between unlocking and rooting an Android device?
                -Unlocking an Android device means removing the restrictions or limitations that prevent you from installing or using certain apps or features on your device. Rooting an Android device means gaining full access or control over the system files and settings of your device. Unlocking an Android device does not require rooting it, but rooting an Android device may help you unlock some apps or features that are otherwise inaccessible.
              2. -
              3. Is unlocking an APK file legal?
                -Unlocking an APK file is not illegal in itself, but it may violate the terms of service or warranty of the app developer or the device manufacturer. It may also infringe the intellectual property rights or privacy rights of the app developer or the device owner. Therefore, you should always check the terms and conditions of the app and the device before you unlock any APK file, and respect the rights and wishes of the parties involved.
              4. -
              5. How can I tell if an APK file is locked or unlocked?
                -There is no definitive way to tell if an APK file is locked or unlocked, as different apps may have different types of locks or restrictions. However, some common signs that an APK file is locked are: it shows an error message when you try to install or run it; it asks for a password or verification code when you try to open it; it does not work properly or crashes frequently on your device; it has a different extension than .apk (such as .apks, .xapk, .zip, etc.).
              6. -
              7. Can I unlock any APK file?
                -Not necessarily. Some APK files may be too difficult or impossible to unlock, depending on the level of encryption, protection, or compatibility they have. Some APK files may also be corrupted, damaged, or incomplete, making them unusable or unsafe to unlock. Therefore, you should always be careful and selective when choosing which APK files to unlock, and only use reliable and effective tools to do so.
              8. -
              9. What are some alternatives to unlocking an APK file?
                -If you cannot or do not want to unlock an APK file, there may be some alternatives that you can try, such as: finding a similar or compatible app that is already unlocked or available on your device; contacting the app developer or the device manufacturer and requesting them to unlock the app or make it compatible with your device; updating your device software or changing your device settings to match the requirements of the app; using a VPN service or a proxy server to access apps that are restricted in your region.
              10. -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Summertime Saga and Discover the Secrets of Your Fathers Death.md b/spaces/fatiXbelha/sd/Download Summertime Saga and Discover the Secrets of Your Fathers Death.md deleted file mode 100644 index b22fc1ba0e39e4a4c500cfd0a4ad63d60eb4a0b3..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Summertime Saga and Discover the Secrets of Your Fathers Death.md +++ /dev/null @@ -1,109 +0,0 @@ -
              -

              Summertime Saga Download: How to Play the Hottest Dating Sim Game

              -

              If you are looking for a fun and spicy game to spice up your life, you might want to check out Summertime Saga. This is a graphical adventure game for adults that has become one of the most popular dating sim games on the internet. In this game, you will play as a young man who is trying to cope with the death of his father, while also dealing with school, work, romance, and mysteries. You will have the chance to interact with over 70 characters, explore over 30 locations, and enjoy over 20 mini-games. Whether you are looking for comedy, drama, romance, or erotica, Summertime Saga has something for everyone.

              -

              Features of Summertime Saga

              -

              Summertime Saga is not your typical dating sim game. It has many features that make it stand out from other games in the genre. Here are some of them:

              -

              summertime saga download


              Downloadhttps://urllie.com/2uNCUx



              -
                -
              • Regular updates and new content: The game is still in development and receives frequent updates from the developers. Each update adds new stories, characters, locations, events, and features to the game. You can always expect something new and exciting in Summertime Saga.
              • -
              • Multiple locations and characters to interact with: The game has a rich and diverse world that you can explore at your own pace. You can visit different places such as your home, school, beach, park, mall, hospital, gym, library, and more. You can also meet and interact with over 70 characters, each with their own personality, backstory, appearance, and voice. You can befriend them, date them, or even seduce them.
              • -
              • Marvelous graphics and sound effects: The game has a beautiful art style that resembles anime and manga. The characters are well-designed and expressive, while the backgrounds are detailed and colorful. The game also has high-quality sound effects and music that enhance the atmosphere and mood of the game.
              • -
              • Engaging mini-games and puzzles: The game is not just about talking and flirting. It also has many mini-games and puzzles that you can play to earn money, increase your stats, or progress the story. You can play guitar, solve Sudoku, rap battle, fish, box, shoot hoops, hack computers, and more.
              • -
              • Customizable protagonist and stats: The game allows you to customize your protagonist's name, appearance, clothes, hairstyle, and accessories. You can also improve your stats such as strength, intelligence, charisma, dexterity, luck, fighting skills, guitar skills, etc. Your stats will affect your performance in mini-games, school grades, relationships with characters, and story outcomes.
              • -
              -

              How to Download Summertime Saga for PC, Mac, and Android

              -

              If you are interested in playing Summertime Saga, you can download it for free from the official website or from other platforms. Here are the steps to download and install the game for different devices:

              -

              PC and Mac

              -

              You can download the game for PC and Mac from the official website or from Steam. The game is compatible with Windows 7 or higher, Mac OS X 10.9 or higher, and Linux. The file size is about 1 GB. To install the game, follow these steps:

              -
                -
              1. Download the zip file from the website or Steam.
              2. -
              3. Extract the zip file to a folder of your choice.
              4. -
              5. Open the folder and double-click on the SummertimeSaga.exe file (for Windows) or SummertimeSaga.app file (for Mac).
              6. -
              7. Enjoy the game!
              8. -
              -

              Android

              -

              You can download the game for Android from the official website or from Google Play Store. The game is compatible with Android 5.0 or higher and requires at least 2 GB of RAM and 2 GB of free storage. The file size is about 800 MB. To install the game, follow these steps:

              -
                -
              1. Download the apk file from the website or Google Play Store.
              2. -
              3. Enable unknown sources in your device settings if you downloaded the apk file from the website.
              4. -
              5. Tap on the apk file and install it.
              6. -
              7. Open the game and grant permissions if asked.
              8. -
              9. Enjoy the game!
              10. -
              -

              Tips and Tricks for Playing Summertime Saga

              -

              Summertime Saga is a fun and addictive game, but it can also be challenging and confusing at times. Here are some tips and tricks that can help you get the most out of your gaming experience:

              -
                -
              • Don't sleep until you have to: Sleeping in the game will advance the time and trigger events, but it will also waste your opportunities to do other things. Try to sleep only when you have no energy left, when you have completed all your tasks for the day, or when you want to progress the story.
              • -
              • Listen to the characters and follow the hints: The characters in the game will often give you clues about what to do next, where to go, or what to buy. Pay attention to their dialogues and look for keywords that indicate their interests, preferences, or needs. You can also check your phone for messages, tasks, and stats.
              • -
              • Make money and increase your stats: Money and stats are essential for unlocking new content, buying items, dating characters, and completing quests. You can make money by doing jobs, selling items, playing mini-games, or gambling. You can increase your stats by studying, working out, playing guitar, reading books, or taking courses.
              • -
              • Check the wiki and guides for walkthroughs: If you are stuck or lost in the game, you can always consult the wiki or guides for walkthroughs, tips, cheats, secrets, and more. You can also join the Discord server or Reddit community to chat with other players and get help.
              • -
              -

              Alternatives to Summertime Saga

              -

              If you love Summertime Saga and want to try out other games that are similar or different, here are some alternatives that you might enjoy:

              -

              summertime saga download free for pc
              -summertime saga download latest version
              -summertime saga download apk android
              -summertime saga download for windows 10
              -summertime saga download ios
              -summertime saga download mac
              -summertime saga download mod apk
              -summertime saga download save data
              -summertime saga download full game
              -summertime saga download update
              -summertime saga download cheats
              -summertime saga download walkthrough
              -summertime saga download guide
              -summertime saga download tips and tricks
              -summertime saga download offline
              -summertime saga download no verification
              -summertime saga download highly compressed
              -summertime saga download mega link
              -summertime saga download google drive
              -summertime saga download mediafire
              -summertime saga download pc 32 bit
              -summertime saga download pc 64 bit
              -summertime saga download pc windows 7
              -summertime saga download pc windows 8
              -summertime saga download pc windows xp
              -summertime saga download android 4.4.2
              -summertime saga download android 5.0
              -summertime saga download android 6.0
              -summertime saga download android 7.0
              -summertime saga download android 8.0
              -summertime saga download android 9.0
              -summertime saga download android 10.0
              -summertime saga download android 11.0
              -summertime saga download ios 9.3.5
              -summertime saga download ios 10.3.3
              -summertime saga download ios 11.4.1
              -summertime saga download ios 12.5.4
              -summertime saga download ios 13.7
              -summertime saga download ios 14.6
              -summertime saga download ios 15 beta
              -summertime saga download mac os x
              -summertime saga download mac os sierra
              -summertime saga download mac os high sierra
              -summertime saga download mac os mojave
              -summertime saga download mac os catalina
              -summertime saga download mac os big sur

              -

              Doki Doki Literature Club

              -

              This is a psychological horror game disguised as a cute dating sim. You will join a literature club and meet four lovely girls who have different personalities and preferences. You will write poems, share them with your club members, and try to impress your crush. However, things will take a dark and twisted turn as you discover that there is more than meets the eye in this game. You can download it for free from Steam or itch.io.

              -

              HuniePop

              -

              This is a match-three puzzle game with dating sim elements. You will meet eight beautiful women who have different tastes and styles. You will talk to them, date them, and give them gifts to increase their affection towards you. You will also play match-three puzzles to impress them and unlock more intimate scenes. You can buy it from Steam or GOG.com.

              -

              Love Esquire

              -

              This is a RPG dating sim where you play as a squire in a medieval fantasy world. You will train hard, fight enemies, and save the world. You will also romance five lovely ladies who have different personalities and backgrounds. You will have to balance your time, money, and relationships as you make choices that affect the story and the endings. You can buy it from Steam or itch.io.

              -

              Monster Prom

              -

              This is a multiplayer dating sim where you date sexy monsters in high school. You will have three weeks to find a date for the prom and impress them with your charm, wit, and style. You will interact with six main love interests and many other characters, each with their own quirks and preferences. You will also encounter random events, mini-games, secrets, and endings. You can play solo or with up to four players online or locally. You can buy it from Steam or GOG.com.

              -

              Conclusion

              -

              Summertime Saga is a game that will keep you entertained for hours with its captivating story, hilarious dialogue, stunning graphics, and varied gameplay. It is a game that you can play at your own pace and style, as you explore the world, meet the characters, and shape your own adventure. If you are looking for a game that will make you laugh, cry, blush, and thrill, Summertime Saga is the game for you.

              -

              So what are you waiting for? Download Summertime Saga today and start your saga!

              -

              FAQs

              -
                -
              • Is Summertime Saga free?: Yes, Summertime Saga is free to download and play. However, you can support the developers by donating on their Patreon page or buying their merchandise.
              • -
              • Is Summertime Saga safe?: Yes, Summertime Saga is safe to download and play. However, it is not suitable for minors or people who are easily offended by adult content.
              • -
              • How long is Summertime Saga?: Summertime Saga is still in development and does not have a definitive ending yet. However, it already has over 60 hours of gameplay content and over 20 storylines to complete.
              • -
              • How do I update Summertime Saga?: You can update Summertime Saga by downloading the latest version from the official website or Steam. You can also enable automatic updates in your device settings.
              • -
              • How do I save my progress in Summertime Saga?: You can save your progress in Summertime Saga by using the save slots in the game menu. You can also use the cloud save feature to sync your progress across different devices.
              • -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Drift Legends The Ultimate Guide to Mastering the Art of Drifting.md b/spaces/fatiXbelha/sd/Drift Legends The Ultimate Guide to Mastering the Art of Drifting.md deleted file mode 100644 index a6d50c3a474ee2e6b1a52007afb303587f38381c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Drift Legends The Ultimate Guide to Mastering the Art of Drifting.md +++ /dev/null @@ -1,156 +0,0 @@ - -

              Drift Racing: A Guide for Beginners

              -

              Have you ever watched a movie or a video where a car slides sideways around a corner, leaving behind a trail of smoke and tire marks? If so, you have witnessed drift racing, one of the most exciting and challenging motorsports in the world. Drift racing is a driving technique where the driver intentionally oversteers, loses traction, and maintains control of the car through the entire corner. It requires skill, precision, and courage to master.

              -

              drift legends


              DOWNLOAD ===> https://urllie.com/2uNGt4



              -

              In this article, we will explain what drift racing is, how it originated, and what are its benefits. We will also show you how to drift a car, what kind of car you need, and how to improve your drifting skills. Whether you want to drift for fun, for competition, or for self-expression, this guide will help you get started on your drifting journey.

              -

              What is Drift Racing?

              -

              The History of Drift Racing

              -

              Drift racing is often associated with Japan, where it became popular in the 1970s. However, the origins of drifting go back to Europe in the 1950s, where drivers like Tazio Nuvolari and Stirling Moss used the four-wheel drift technique to corner faster in Formula One races. Later, in Japan, Kunimitsu Takahashi developed his own style of drifting in touring car races, earning him several championships and a large fan base. His drifting skills inspired Keiichi Tsuchiya, who became known as the "Drift King" and practiced his drifting on mountain roads. In 1987, Tsuchiya starred in a video called Pluspy, which showcased his drifting abilities and influenced many other drivers. In 1988, he helped organize one of the first events specifically for drifting, called the Ikaten. [^5]

              -

              Since then, drifting has grown into a global phenomenon, with competitions held all over the world. Some of the most famous drifting events include D1 Grand Prix in Japan, Formula Drift in the USA, Drift Masters European Championship in Europe, and Red Bull Car Park Drift in the Middle East and Africa. Drifting has also been featured in movies, video games, manga, and anime, such as The Fast and the Furious: Tokyo Drift and Initial D.

              -

              drift legends real car racing
              -drift legends steam
              -drift legends online
              -drift legends apk
              -drift legends mod apk
              -drift legends pc
              -drift legends cheats
              -drift legends codes
              -drift legends hack
              -drift legends download
              -drift legends game
              -drift legends android
              -drift legends ios
              -drift legends review
              -drift legends tips
              -drift legends guide
              -drift legends best car
              -drift legends cars list
              -drift legends gameplay
              -drift legends multiplayer
              -drift legends controller support
              -drift legends y8
              -drift legends unblocked
              -drift legends free play
              -drift legends wiki
              -drift legends reddit
              -drift legends discord
              -drift legends facebook
              -drift legends instagram
              -drift legends twitter
              -drift legends youtube
              -drift legends trailer
              -drift legends soundtrack
              -drift legends update
              -drift legends patch notes
              -drift legends events
              -drift legends career mode
              -drift legends ghost mode
              -drift legends photo mode
              -drift legends video mode
              -drift legends tracks list
              -drift legends tuning parts
              -drift legends daily rewards
              -drift legends achievements
              -drift legends leaderboard
              -drift legends simulator
              -drift legends 3d graphics
              -drift legends realistic physics
              -drift legends engine sounds

              -

              The Basics of Drift Racing

              -

              Drifting is not about speed or lap times; it is about style and showmanship. Drifting is judged by several criteria, such as speed, angle, line, and smoke. Speed refers to how fast the car goes through the corner; angle refers to how much the car is sideways; line refers to how close the car follows the ideal path; and smoke refers to how much tire smoke the car produces. The more speed, angle, line, and smoke the driver can achieve while maintaining control of the car, the higher the score.

              -

              To drift a car, the driver needs to make the rear wheels lose traction while keeping the front wheels pointed in the opposite direction of the turn. This is called oversteer and countersteer. There are different ways to initiate oversteer, such as using the throttle, the handbrake, or the clutch. Once oversteer is initiated, the driver needs to balance the throttle and steering inputs to maintain the drift through the corner.

              -

              The Benefits of Drift Racing

              -

              Drifting has several benefits for drivers who want to have fun and improve their driving skills. Some of these benefits are:

              -
                -
              • Cardiovascular exercise: Drifting

                Drifting is a great way to get your heart rate up and burn calories. According to a study by the University of Tokyo, drifting can increase your heart rate by up to 40% and burn up to 400 calories per hour. Drifting also releases adrenaline and endorphins, which can boost your mood and energy levels.

              • -
              • Car control: Drifting teaches you how to handle your car in different situations and conditions. You learn how to control oversteer, understeer, weight transfer, and traction. You also develop your reflexes, coordination, and spatial awareness. These skills can help you become a better and safer driver on the road.
              • -
              • Creativity and expression: Drifting allows you to express your personality and style through your car and driving. You can customize your car with different parts, colors, stickers, and accessories. You can also choose your own drifting technique, line, and angle. Drifting is a form of art that lets you create your own masterpiece on the asphalt.
              • -
              -

              How to Drift a Car?

              -

              Choosing a Car and a Practice Spot

              -

              The first step to drifting is choosing a suitable car and a safe place to practice. Ideally, you want a car that has:

              -
                -
              • Rear-wheel drive: This means that the power from the engine goes to the rear wheels, which makes it easier to break traction and initiate oversteer.
              • -
              • Manual transmission: This gives you more control over the engine speed and torque, which are essential for drifting. You can also use the clutch to kick the rear wheels into a slide.
              • -
              • Limited-slip differential: This is a device that distributes the power evenly between the rear wheels, which prevents one wheel from spinning faster than the other. This helps you maintain stability and balance during a drift.
              • -
              • Lightweight chassis: This reduces the inertia and resistance of the car, which makes it more responsive and agile.
              • -
              -

              Some examples of popular drift cars are the Toyota AE86, Nissan 240SX, Mazda RX-7, BMW E36, and Ford Mustang. However, you can drift any car as long as you have the skill and confidence.

              -

              Once you have a car, you need to find a place where you can practice drifting without endangering yourself or others. The best place to practice is a closed circuit or a track that has wide open spaces, smooth surfaces, and no obstacles or traffic. You can also look for empty parking lots, industrial areas, or rural roads that are legal and safe for drifting. Make sure you have permission from the owner or authority before you start drifting.

              -

              Performing a Power Over to Drift

              -

              One of the easiest ways to drift is by using the power over technique. This involves using the throttle to induce oversteer at the exit of a corner. Here are the steps to perform a power over drift:

              -
                -
              1. Approach the corner at a moderate speed and turn in smoothly.
              2. -
              3. As you reach the apex of the corner, apply more throttle to accelerate out of the corner.
              4. -
              5. The rear wheels will start to spin faster than the front wheels, causing them to lose grip and slide outwards.
              6. -
              7. Countersteer by turning the steering wheel in the opposite direction of the slide. For example, if the rear end slides to the left, turn the wheel to the right.
              8. -
              9. Modulate the throttle to control the angle and speed of the drift. If you apply too much throttle, you will spin out; if you apply too little throttle, you will straighten out.
              10. -
              11. Maintain the drift until you exit the corner or reach your desired point.
              12. -
              -

              Using the Handbrake to Slide an Automatic

              -

              If you have an automatic car, you can use the handbrake or emergency brake to initiate oversteer at the entry of a corner. The handbrake locks up the rear wheels, which makes them skid and slide sideways. Here are the steps to perform a handbrake drift:

              -
                -
              1. Approach the corner at a moderate speed and turn in slightly.
              2. -
              3. Pull up the handbrake quickly and firmly to lock up the rear wheels and make them slide outwards.
              4. -
              5. Countersteer by turning the steering wheel in the opposite direction of the slide.
              6. -
              7. Release the handbrake and apply some throttle to keep the drift going.
              8. -
              9. Modulate the throttle and steering to control the angle and speed of the drift.
              10. -
              11. Maintain the drift until you exit the corner or reach your desired point.
              12. -
              -

              Using the Clutch Kick to Drift a Manual

              -

              If you have a manual car, you can use the clutch kick technique to initiate oversteer at any point of a corner. The clutch kick involves pressing and releasing the clutch pedal quickly to create a sudden burst of power to the rear wheels, which makes them spin and slide. Here are the steps to perform a clutch kick drift:

              -
                -
              1. Approach the corner at a moderate speed and turn in slightly.
              2. -
              3. Shift to a lower gear and rev up the engine to a high RPM.
              4. -
              5. Press and release the clutch pedal quickly to send a surge of power to the rear wheels.
              6. -
              7. The rear wheels will start to spin faster than the front wheels, causing them to lose grip and slide outwards.
              8. -
              9. Countersteer by turning the steering wheel in the opposite direction of the slide.
              10. -
              11. Modulate the throttle and clutch to control the angle and speed of the drift. If you press the clutch too long, you will lose power; if you release the clutch too soon, you will regain traction.
              12. -
              13. Maintain the drift until you exit the corner or reach your desired point.
              14. -
              -

              How to Improve Your Drifting Skills?

              -

              Upgrading Your Car and Suspension

              -

              As you progress in your drifting skills, you may want to upgrade your car and suspension to enhance your performance and style. Some of the common upgrades for drift cars are:

              -
                -
              • Tires: You want tires that have good grip, durability, and smoke production. You can also use different tires for the front and rear wheels, depending on your preference. For example, some drivers use grippy tires for the front wheels and slippery tires for the rear wheels, which makes it easier to initiate oversteer.
              • -
              • Brakes: You want brakes that have good stopping power, modulation, and cooling. You can also use different brakes for the front and rear wheels, depending on your preference. For example, some drivers use larger brakes for the front wheels and smaller brakes for the rear wheels, which makes it easier to lock up the rear wheels with the handbrake.
              • -
              • Suspension: You want suspension that has good stiffness, damping, and adjustability. You can also use different suspension settings for the front and rear wheels, depending on your preference. For example, some drivers use stiffer suspension for the front wheels and softer suspension for the rear wheels, which makes it easier to transfer weight and balance during a drift.
              • -
              -

              Practicing Different Drifting Techniques

              -

              Besides the techniques we have mentioned above, there are many other drifting techniques that you can learn and practice to improve your drifting skills. Some of these techniques are:

              -
                -
              • Feint drift: This involves swerving or flicking the car in one direction before entering a corner in the opposite direction, which creates momentum and oversteer.
              • -
              • Braking drift: This involves applying the brakes before entering a corner, which shifts the weight to the front wheels and makes the rear wheels lose traction.
              • -
              • Shift lock drift: This involves downshifting to a lower gear without using the clutch, which makes the rear wheels lock up and skid.
              • -
              • Dirt drop drift: This involves dropping one or both rear wheels off the road onto the dirt or grass, which reduces the grip and increases the angle of the drift.
              • -
              • Jump drift: This involves jumping over a bump or a crest, which lifts the car off the ground and makes it slide sideways.
              • -
              -

              You can practice these techniques on different types of corners, such as hairpins, chicanes, S-bends, and sweepers. You can also practice on different types of surfaces, such as asphalt, concrete, gravel, and snow. The more you practice, the more you will learn how to adapt to different situations and conditions.

              -

              Finding Your Drifting Style and Purpose

              -

              Drifting is not only a sport, but also a culture and a lifestyle. Drifting has many subcultures and communities that share their passion and knowledge of drifting. Some of these subcultures are:

              -
                -
              • Touge: This is a Japanese term that refers to drifting on mountain roads, where drivers challenge each other in downhill or uphill races.
              • -
              • Gymkhana: This is a type of motorsport that involves drifting around obstacles, such as cones, barrels, and tires, in a timed course.
              • -
              • Driftkhana: This is a combination of drifting and gymkhana, where drivers perform stunts and tricks while drifting around obstacles.
              • -
              • Freestyle: This is a type of drifting that involves expressing yourself through your car and driving, without following any rules or regulations.
              • -
              -

              You can find your own drifting style and purpose by exploring different aspects of drifting and joining different drifting communities. You can also participate in drifting events, competitions, and shows, where you can showcase your skills and meet other drifters. Drifting is a fun and rewarding hobby that can enrich your life and make you happy.

              -

              Conclusion

              -

              Drift racing is an amazing motorsport that involves sliding a car sideways around corners. It originated in Europe and Japan in the 1950s and 1970s, and has since become a global phenomenon. Drifting has many benefits for drivers who want to have fun and improve their driving skills. It also allows drivers to express their creativity and style through their car and driving. To drift a car, you need a rear-wheel drive car with manual transmission and limited-slip differential. You also need to learn how to initiate oversteer, countersteer, and modulate the throttle. You can improve your drifting skills by upgrading your car and suspension, practicing different drifting techniques, and finding your drifting style and purpose. Drifting is a great way to enjoy driving and have an awesome time.

              -

              FAQs

              -

              Here are some frequently asked questions about drift racing:

              -
                -
              • Q: Is drift racing illegal?
              • -
              • A: Drift racing is illegal on public roads, as it can endanger yourself and others. However, drift racing is legal on private tracks or circuits that are authorized for drifting. You can also join drifting clubs or organizations that organize legal drifting events.
              • -
              • Q: Is drift racing expensive?
              • -
              • A: Drift racing can be expensive depending on how much you invest in your car and equipment. Drifting can also wear out your tires, brakes, clutch, engine, and suspension faster than normal driving. However, you can start drifting with a low-budget car and upgrade it gradually as you progress. You can also save money by buying used parts or doing your own maintenance.
              • -
              • Q: Is drift racing dangerous?
              • -
              • A: Drift racing can be dangerous if you are not careful or experienced enough. Drifting can cause accidents, injuries, or damages if you lose control of your car or hit something or someone. Therefore, you should always wear proper safety gear, such as helmet, gloves, seat belt, and fire extinguisher. You should also follow the rules and regulations of the track or event you are participating in. You should never drift under the influence of alcohol or drugs.
              • -
              • Q: Is drift racing fun?
              • -
              • A: Drift racing is fun for many reasons. It is fun to slide a car sideways around corners and feel the adrenaline rush. It is fun to challenge yourself and improve your driving skills. It is fun to customize your car and show your personality and style. It is fun to meet other drifters and share your passion and knowledge of drifting. Drifting is fun for anyone who loves driving and having a good time.
              • -
              • Q: How can I learn more about drift racing?
              • -
              • A: You can learn more about drift racing by reading books, magazines, blogs, or websites about drifting. You can also watch videos, movies, or shows about drifting. You can also join online forums, groups, or communities where you can ask questions, get tips, or share your experiences with other drifters. You can also attend drifting events, competitions, or shows where you can watch, learn, or participate in drifting.
              • -
              -

              I hope you enjoyed this article and learned something new about drift racing. If you have any comments, feedback, or suggestions, please let me know. I would love to hear from you. Thank you for reading and happy drifting!

              401be4b1e0
              -
              -
              \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/examples/wenzhong_qa/finetune_wenzhong.sh b/spaces/fclong/summary/fengshen/examples/wenzhong_qa/finetune_wenzhong.sh deleted file mode 100644 index 0100377bf5c54c0eba3088e3b09368a5b31f9c06..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/wenzhong_qa/finetune_wenzhong.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=finetune_wenzhong -#SBATCH --cpus-per-task=50 -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=1 -#SBATCH --gres=gpu:1 # number of gpus -#SBATCH -o %x-%j.log -#SBATCH -e %x-%j.err - -set -x -e - -export MASTER_PORT=$[RANDOM%10000+50000] -export TORCH_EXTENSIONS_DIR=/cognitive_comp/gaoxinyu/torch_extendsions - -echo "START TIME: $(date)" -MICRO_BATCH_SIZE=1 -ROOT_DIR=/cognitive_comp/gaoxinyu/FS/fengshen/fengshen - -ZERO_STAGE=3 - -config_json="$ROOT_DIR/ds_config.$SLURM_JOBID.json" -#config_json="$ROOT_DIR/ds_config.wzw.json" -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -cat < $config_json -{ - "train_micro_batch_size_per_gpu":1, - "steps_per_print":100, - "gradient_clipping":1, - "zero_optimization":{ - "stage": $ZERO_STAGE, - "offload_optimizer":{ - "device":"cpu", - "pin_memory":true - }, - "offload_param":{ - "device":"cpu", - "pin_memory":true - }, - "overlap_comm":true, - "contiguous_gradients":true, - "sub_group_size":1000000000, - "stage3_max_live_parameters":1000000000, - "stage3_max_reuse_distance":1000000000, - "stage3_gather_fp16_weights_on_model_save":true - }, - "optimizer":{ - "type":"Adam", - "params":{ - "lr": 1e-5, - "weight_decay":0.01 - } - }, - "scheduler":{ - "type":"WarmupLR", - "params":{ - "warmup_min_lr":5e-6, - "warmup_max_lr":1e-5 - } - }, - "zero_allow_untested_optimizer":false, - "fp16":{ - "enabled":true, - "loss_scale":0, - "loss_scale_window":1000, - "hysteresis":2, - "min_loss_scale":1 - }, - "activation_checkpointing":{ - "partition_activations":false, - "contiguous_memory_optimization":false - }, - "wall_clock_breakdown":false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json - -TRAINER_ARGS=" - --max_epochs 2 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy deepspeed_stage_3 \ - --precision 16 \ - --default_root_dir $ROOT_DIR \ - --dirpath $ROOT_DIR/ckpt \ - --save_top_k 3 \ - --monitor train_loss \ - --mode min \ - --save_last \ -" -DATA_DIR=/cognitive_comp/gaoxinyu/data/yuyuan -DATA_ARGS=" - --data_dir $DATA_DIR \ - --train_batchsize $MICRO_BATCH_SIZE \ - --valid_batchsize $MICRO_BATCH_SIZE \ - --train_data train.txt \ - --valid_data valid.txt \ - --test_data test.txt -" - -MODEL_ARGS=" - --pretrained_model_path /cognitive_comp/gaoxinyu/hf_model/wenzhong \ - --output_save_path $ROOT_DIR/predict.json \ - --learning_rate 1e-4 \ - --weight_decay 0.1 \ - --warmup 0.01 \ -" - -SCRIPTS_PATH=/cognitive_comp/gaoxinyu/FS/fengshen/finetune_wenzhong.py - -export CMD=" \ - $SCRIPTS_PATH \ - $TRAINER_ARGS \ - $MODEL_ARGS \ - $DATA_ARGS \ - " - -echo $CMD - -SINGULARITY_PATH=/cognitive_comp/gaoxinyu/docker/pytorch21_06_py3_docker_image_v2.sif - -# to debug - add echo (it exits and prints what it would have launched) -#run_cmd="$PY_LAUNCHER $CMD" - -clear; srun --jobid $SLURM_JOBID singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH bash -c 'python $CMD' -# bash -c 'python $CMD' \ No newline at end of file diff --git a/spaces/felicco/andite-pastel-mix/app.py b/spaces/felicco/andite-pastel-mix/app.py deleted file mode 100644 index f90587fb1b4538cf5a92df4655fed8e4e2cf9579..0000000000000000000000000000000000000000 --- a/spaces/felicco/andite-pastel-mix/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/andite/pastel-mix").launch() \ No newline at end of file diff --git a/spaces/firsk/ai_otto/text/chinese.py b/spaces/firsk/ai_otto/text/chinese.py deleted file mode 100644 index 51acb3ec401d7647278a25537576a0fb1775d827..0000000000000000000000000000000000000000 --- a/spaces/firsk/ai_otto/text/chinese.py +++ /dev/null @@ -1,198 +0,0 @@ -import os -import re - -import cn2an -from pypinyin import lazy_pinyin, Style - -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi - -current_file_path = os.path.dirname(__file__) -pinyin_to_symbol_map = { - line.split("\t")[0]: line.strip().split("\t")[1] - for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines() -} - -import jieba.posseg as psg - - -rep_map = { - ":": ",", - ";": ",", - ",": ",", - "。": ".", - "!": "!", - "?": "?", - "\n": ".", - "·": ",", - "、": ",", - "...": "…", - "$": ".", - "“": "'", - "”": "'", - "‘": "'", - "’": "'", - "(": "'", - ")": "'", - "(": "'", - ")": "'", - "《": "'", - "》": "'", - "【": "'", - "】": "'", - "[": "'", - "]": "'", - "—": "-", - "~": "-", - "~": "-", - "「": "'", - "」": "'", -} - -tone_modifier = ToneSandhi() - - -def replace_punctuation(text): - text = text.replace("嗯", "恩").replace("呣", "母") - pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub( - r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text - ) - - return replaced_text - - -def g2p(text): - pattern = r"(?<=[{0}])\s*".format("".join(punctuation)) - sentences = [i for i in re.split(pattern, text) if i.strip() != ""] - phones, tones, word2ph = _g2p(sentences) - assert sum(word2ph) == len(phones) - assert len(word2ph) == len(text) # Sometimes it will crash,you can add a try-catch. - phones = ["_"] + phones + ["_"] - tones = [0] + tones + [0] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -def _get_initials_finals(word): - initials = [] - finals = [] - orig_initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS) - orig_finals = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3 - ) - for c, v in zip(orig_initials, orig_finals): - initials.append(c) - finals.append(v) - return initials, finals - - -def _g2p(segments): - phones_list = [] - tones_list = [] - word2ph = [] - for seg in segments: - # Replace all English words in the sentence - seg = re.sub("[a-zA-Z]+", "", seg) - seg_cut = psg.lcut(seg) - initials = [] - finals = [] - seg_cut = tone_modifier.pre_merge_for_modify(seg_cut) - for word, pos in seg_cut: - if pos == "eng": - continue - sub_initials, sub_finals = _get_initials_finals(word) - sub_finals = tone_modifier.modified_tone(word, pos, sub_finals) - initials.append(sub_initials) - finals.append(sub_finals) - - # assert len(sub_initials) == len(sub_finals) == len(word) - initials = sum(initials, []) - finals = sum(finals, []) - # - for c, v in zip(initials, finals): - raw_pinyin = c + v - # NOTE: post process for pypinyin outputs - # we discriminate i, ii and iii - if c == v: - assert c in punctuation - phone = [c] - tone = "0" - word2ph.append(1) - else: - v_without_tone = v[:-1] - tone = v[-1] - - pinyin = c + v_without_tone - assert tone in "12345" - - if c: - # 多音节 - v_rep_map = { - "uei": "ui", - "iou": "iu", - "uen": "un", - } - if v_without_tone in v_rep_map.keys(): - pinyin = c + v_rep_map[v_without_tone] - else: - # 单音节 - pinyin_rep_map = { - "ing": "ying", - "i": "yi", - "in": "yin", - "u": "wu", - } - if pinyin in pinyin_rep_map.keys(): - pinyin = pinyin_rep_map[pinyin] - else: - single_rep_map = { - "v": "yu", - "e": "e", - "i": "y", - "u": "w", - } - if pinyin[0] in single_rep_map.keys(): - pinyin = single_rep_map[pinyin[0]] + pinyin[1:] - - assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin) - phone = pinyin_to_symbol_map[pinyin].split(" ") - word2ph.append(len(phone)) - - phones_list += phone - tones_list += [int(tone)] * len(phone) - return phones_list, tones_list, word2ph - - -def text_normalize(text): - numbers = re.findall(r"\d+(?:\.?\d+)?", text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) - return text - - -def get_bert_feature(text, word2ph): - from text import chinese_bert - - return chinese_bert.get_bert_feature(text, word2ph) - - -if __name__ == "__main__": - from text.chinese_bert import get_bert_feature - - text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏" - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) - - -# # 示例用法 -# text = "这是一个示例文本:,你好!这是一个测试...." -# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试 diff --git a/spaces/fiyen/YangyangChatGPT/modules/chat_func.py b/spaces/fiyen/YangyangChatGPT/modules/chat_func.py deleted file mode 100644 index a4875fb03a27c5e29ef5e439ab7e3da3fc827155..0000000000000000000000000000000000000000 --- a/spaces/fiyen/YangyangChatGPT/modules/chat_func.py +++ /dev/null @@ -1,513 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import os -import requests -import urllib3 - -from tqdm import tqdm -import colorama -from duckduckgo_search import ddg -import asyncio -import aiohttp -from llama_index.indices.query.vector_store import GPTVectorStoreIndexQuery -from llama_index.indices.query.schema import QueryBundle -from langchain.llms import OpenAIChat - -from modules.presets import * -from modules.llama_func import * -from modules.utils import * -import modules.shared as shared - -# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s") - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -initial_prompt = "You are a helpful assistant." -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def get_response( - openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model -): - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - history = [construct_system(system_prompt), *history] - - payload = { - "model": selected_model, - "messages": history, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - if stream: - timeout = timeout_streaming - else: - timeout = timeout_all - - proxies = get_proxies() - - # 如果有自定义的api-url,使用自定义url发送请求,否则使用默认设置发送请求 - if shared.state.api_url != API_URL: - logging.info(f"使用自定义API URL: {shared.state.api_url}") - - response = requests.post( - shared.state.api_url, - headers=headers, - json=payload, - stream=True, - timeout=timeout, - proxies=proxies, - ) - - return response - - -def stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - def get_return_value(): - return chatbot, history, status_text, all_token_counts - - logging.info("实时回答模式") - partial_words = "" - counter = 0 - status_text = "开始实时传输回答……" - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - user_token_count = 0 - if fake_input is not None: - input_token_count = count_token(construct_user(fake_input)) - else: - input_token_count = count_token(construct_user(inputs)) - if len(all_token_counts) == 0: - system_prompt_token_count = count_token(construct_system(system_prompt)) - user_token_count = ( - input_token_count + system_prompt_token_count - ) - else: - user_token_count = input_token_count - all_token_counts.append(user_token_count) - logging.info(f"输入token计数: {user_token_count}") - yield get_return_value() - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - True, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - yield get_return_value() - return - except requests.exceptions.ReadTimeout: - status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt - yield get_return_value() - return - - yield get_return_value() - error_json_str = "" - - if fake_input is not None: - history[-2] = construct_user(fake_input) - for chunk in response.iter_lines(): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - chunk = chunk.decode() - chunklength = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - logging.info(chunk) - error_json_str += chunk - status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}" - yield get_return_value() - continue - # decode each line as response data is in bytes - if chunklength > 6 and "delta" in chunk["choices"][0]: - finish_reason = chunk["choices"][0]["finish_reason"] - status_text = construct_token_message( - sum(all_token_counts), stream=True - ) - if finish_reason == "stop": - yield get_return_value() - break - try: - partial_words = ( - partial_words + chunk["choices"][0]["delta"]["content"] - ) - except KeyError: - status_text = ( - standard_error_msg - + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: " - + str(sum(all_token_counts)) - ) - yield get_return_value() - break - history[-1] = construct_assistant(partial_words) - chatbot[-1] = (chatbot[-1][0], partial_words+display_append) - all_token_counts[-1] += 1 - yield get_return_value() - - -def predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=None, - display_append="" -): - logging.info("一次性回答模式") - history.append(construct_user(inputs)) - history.append(construct_assistant("")) - if fake_input: - chatbot.append((fake_input, "")) - else: - chatbot.append((inputs, "")) - if fake_input is not None: - all_token_counts.append(count_token(construct_user(fake_input))) - else: - all_token_counts.append(count_token(construct_user(inputs))) - try: - response = get_response( - openai_api_key, - system_prompt, - history, - temperature, - top_p, - False, - selected_model, - ) - except requests.exceptions.ConnectTimeout: - status_text = ( - standard_error_msg + connection_timeout_prompt + error_retrieve_prompt - ) - return chatbot, history, status_text, all_token_counts - except requests.exceptions.ProxyError: - status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - except requests.exceptions.SSLError: - status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt - return chatbot, history, status_text, all_token_counts - response = json.loads(response.text) - if fake_input is not None: - history[-2] = construct_user(fake_input) - try: - content = response["choices"][0]["message"]["content"] - history[-1] = construct_assistant(content) - chatbot[-1] = (chatbot[-1][0], content+display_append) - total_token_count = response["usage"]["total_tokens"] - if fake_input is not None: - all_token_counts[-1] += count_token(construct_assistant(content)) - else: - all_token_counts[-1] = total_token_count - sum(all_token_counts) - status_text = construct_token_message(total_token_count) - return chatbot, history, status_text, all_token_counts - except KeyError: - status_text = standard_error_msg + str(response) - return chatbot, history, status_text, all_token_counts - -def is_repeated_string(s): - n = len(s) - for i in range(1, n // 2 + 1): - if n % i == 0: - sub = s[:i] - if sub * (n // i) == s: - return True - return False - -def predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - use_websearch=False, - files = None, - reply_language="中文", - should_check_token_count=True, -): # repetition_penalty, top_k - logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL) - if is_repeated_string(inputs): - print("================== 有人来浪费了 ======================") - yield chatbot+[(inputs, "🖕️🖕️🖕️🖕️🖕️看不起你")], history, "🖕️🖕️🖕️🖕️🖕️🖕️", all_token_counts - return - if should_check_token_count: - yield chatbot+[(inputs, "")], history, "开始生成回答……", all_token_counts - if reply_language == "跟随问题语言(不稳定)": - reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch." - old_inputs = None - display_reference = [] - limited_context = False - if files: - limited_context = True - old_inputs = inputs - msg = "加载索引中……(这可能需要几分钟)" - logging.info(msg) - yield chatbot+[(inputs, "")], history, msg, all_token_counts - index = construct_index(openai_api_key, file_src=files) - msg = "索引构建完成,获取回答中……" - logging.info(msg) - yield chatbot+[(inputs, "")], history, msg, all_token_counts - llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name=selected_model)) - prompt_helper = PromptHelper(max_input_size = 4096, num_output = 5, max_chunk_overlap = 20, chunk_size_limit=600) - service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) - query_object = GPTVectorStoreIndexQuery(index.index_struct, service_context=service_context, similarity_top_k=5, vector_store=index._vector_store, docstore=index._docstore) - query_bundle = QueryBundle(inputs) - nodes = query_object.retrieve(query_bundle) - reference_results = [n.node.text for n in nodes] - reference_results = add_source_numbers(reference_results, use_source=False) - display_reference = add_details(reference_results) - display_reference = "\n\n" + "".join(display_reference) - inputs = ( - replace_today(PROMPT_TEMPLATE) - .replace("{query_str}", inputs) - .replace("{context_str}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language ) - ) - elif use_websearch: - limited_context = True - search_results = ddg(inputs, max_results=5) - old_inputs = inputs - reference_results = [] - for idx, result in enumerate(search_results): - logging.info(f"搜索结果{idx + 1}:{result}") - domain_name = urllib3.util.parse_url(result["href"]).host - reference_results.append([result["body"], result["href"]]) - display_reference.append(f"{idx+1}. [{domain_name}]({result['href']})\n") - reference_results = add_source_numbers(reference_results) - display_reference = "\n\n" + "".join(display_reference) - inputs = ( - replace_today(WEBSEARCH_PTOMPT_TEMPLATE) - .replace("{query}", inputs) - .replace("{web_results}", "\n\n".join(reference_results)) - .replace("{reply_language}", reply_language ) - ) - else: - display_reference = "" - - if len(openai_api_key) != 51: - status_text = standard_error_msg + no_apikey_msg - logging.info(status_text) - chatbot.append((inputs, "")) - if len(history) == 0: - history.append(construct_user(inputs)) - history.append("") - all_token_counts.append(0) - else: - history[-2] = construct_user(inputs) - yield chatbot+[(inputs, "")], history, status_text, all_token_counts - return - elif len(inputs.strip()) == 0: - status_text = standard_error_msg + no_input_msg - logging.info(status_text) - yield chatbot+[(inputs, "")], history, status_text, all_token_counts - return - - if stream: - logging.info("使用流式传输") - iter = stream_predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=display_reference - ) - for chatbot, history, status_text, all_token_counts in iter: - if shared.state.interrupted: - shared.state.recover() - return - yield chatbot, history, status_text, all_token_counts - else: - logging.info("不使用流式传输") - chatbot, history, status_text, all_token_counts = predict_all( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - all_token_counts, - top_p, - temperature, - selected_model, - fake_input=old_inputs, - display_append=display_reference - ) - yield chatbot, history, status_text, all_token_counts - - logging.info(f"传输完毕。当前token计数为{all_token_counts}") - if len(history) > 1 and history[-1]["content"] != inputs: - logging.info( - "回答为:" - + colorama.Fore.BLUE - + f"{history[-1]['content']}" - + colorama.Style.RESET_ALL - ) - - if limited_context: - history = history[-4:] - all_token_counts = all_token_counts[-2:] - yield chatbot, history, status_text, all_token_counts - - if stream: - max_token = MODEL_SOFT_TOKEN_LIMIT[selected_model]["streaming"] - else: - max_token = MODEL_SOFT_TOKEN_LIMIT[selected_model]["all"] - - if sum(all_token_counts) > max_token and should_check_token_count: - status_text = f"精简token中{all_token_counts}/{max_token}" - logging.info(status_text) - yield chatbot, history, status_text, all_token_counts - iter = reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - all_token_counts, - top_p, - temperature, - max_token//2, - selected_model=selected_model, - ) - for chatbot, history, status_text, all_token_counts in iter: - status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}" - yield chatbot, history, status_text, all_token_counts - - -def retry( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - stream=False, - selected_model=MODELS[0], - reply_language="中文", -): - logging.info("重试中……") - if len(history) == 0: - yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count - return - history.pop() - inputs = history.pop()["content"] - token_count.pop() - iter = predict( - openai_api_key, - system_prompt, - history, - inputs, - chatbot, - token_count, - top_p, - temperature, - stream=stream, - selected_model=selected_model, - reply_language=reply_language, - ) - logging.info("重试中……") - for x in iter: - yield x - logging.info("重试完毕") - - -def reduce_token_size( - openai_api_key, - system_prompt, - history, - chatbot, - token_count, - top_p, - temperature, - max_token_count, - selected_model=MODELS[0], - reply_language="中文", -): - logging.info("开始减少token数量……") - iter = predict( - openai_api_key, - system_prompt, - history, - summarize_prompt, - chatbot, - token_count, - top_p, - temperature, - selected_model=selected_model, - should_check_token_count=False, - reply_language=reply_language, - ) - logging.info(f"chatbot: {chatbot}") - flag = False - for chatbot, history, status_text, previous_token_count in iter: - num_chat = find_n(previous_token_count, max_token_count) - logging.info(f"previous_token_count: {previous_token_count}, keeping {num_chat} chats") - if flag: - chatbot = chatbot[:-1] - flag = True - history = history[-2*num_chat:] if num_chat > 0 else [] - token_count = previous_token_count[-num_chat:] if num_chat > 0 else [] - msg = f"保留了最近{num_chat}轮对话" - yield chatbot, history, msg + "," + construct_token_message( - sum(token_count) if len(token_count) > 0 else 0, - ), token_count - logging.info(msg) - logging.info("减少token数量完毕") diff --git a/spaces/flowers-team/SocialAISchool/textworld_utils/__init__.py b/spaces/flowers-team/SocialAISchool/textworld_utils/__init__.py deleted file mode 100644 index 90f60fdd89ad8575faafe45188bd1d968852fc67..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/textworld_utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .utils import * \ No newline at end of file diff --git a/spaces/fmind/resume/files/linkedin.md b/spaces/fmind/resume/files/linkedin.md deleted file mode 100644 index 78e4c96380544d5581a640d53f0a3946a6684f4a..0000000000000000000000000000000000000000 --- a/spaces/fmind/resume/files/linkedin.md +++ /dev/null @@ -1,1168 +0,0 @@ -# Profile - -## Overview - -- First name: Médéric -- Last name: HURIER -- Pseudo: Fmind -- Followers: 4K -- Location: Luxembourg, Luxembourg -- Education: University of Luxembourg -- Current position: Decathlon Technology -- Public URL: www.linkedin.com/in/fmind-dev -- Industry: Technology, Information and Internet -- Address: 28 Avenue François Clément, 5612 Mondorf-les-Bains, Luxembourg -- Headline: Freelancer | AI/ML/MLOps Engineer | Data Scientist | MLOps Community Organizer | OpenClassrooms Mentor | Hacker | PhD - -## Websites - -- Company website: https://www.fmind.dev -- Twitter: https://twitter.com/fmind_dev -- Portfolio: https://github.com/fmind/ -- Blog: https://fmind.medium.com/ - -## About - -Note: I'm not available to work on new missions until the 1st of -September 2024. Thank you for your understanding. - -When I worked as a teacher, I told my students that Artificial -Intelligence and Machine Learning are the most effective levers to make -a difference. Every day, new AI and ML solutions are released to empower -companies and individuals alike. The question is: Is your business ready -to provide the best AI/ML products for your customers? - -I'm a professional Machine Learning Engineer, Data Scientist, and MLOps -ready to assist you in this quest. I've completed a Ph.D. in Machine -Learning and several high-end AI/ML certifications to help you build -leading data-driven services. My past experiences include working with -companies like Google, BNP Paribas, ArcelorMittal, the European -Commission, and Decathlon to frame their needs, create state-of-the-art -models and deliver AI/ML artifacts at scale. - -I now work as a freelancer in Luxembourg, and I can carry out missions -remotely in other European countries. You can get in touch with me on -LinkedIn or at contact@fmind.dev. I'll be happy to collaborate with you -or discuss your favored AI/ML topics in the MLOps Community. - -# Experience - -## Lead MLOps Engineer - -- Company: Decathlon Technology -- Period: Sep 2022 - Present -- Location: Luxembourg (Hybrid) -- Mission: Continue the design and implementation of Decathlon's MLOps platform with Databricks and AWS. -- Skills: Artificial Intelligence (AI) · Machine Learning · MLOps · Python · ChatGPT · Agile Methodology · MLflow · Docker · Jira · Git · DataBricks · Apache Airflow · AWS SageMaker · Terraform · Apache Spark · Kubernetes - -## Mentor for aspiring Data Scientist and AI/ML Engineer - -- Company: OpenClassrooms -- Period: Mar 2023 - Present -- Location: France (Remote) -- Mission: Tutoring adult students to become data scientists specializing in machine learning. -- Courses: - - https://openclassrooms.com/fr/paths/793-data-scientist - - https://openclassrooms.com/fr/paths/794-machine-learning-engineer - - https://openclassrooms.com/fr/paths/795-ai-engineer -- Skills: Artificial Intelligence (AI) · Machine Learning · Python · Deep Learning · Data Science · Statistics · ChatGPT · Big Data · Jupyter · Pandas · Git · Natural Language Processing (NLP) · Scikit-Learn - -## Senior Data Scientist & Project Manager - -- Company: Cronos Europa -- Customer: European Commission -- Period: Dec 2021 - Sep 2022 -- Location: Luxembourg (Hybrid) -- Mission: Enhance the ARACHNE risk scoring tool (fraud detection). -- Main tasks and responsibilities: - - Develop a new version of Arachne using data mining techniques - - Manage the development of the Arachne PoC/Project (SCRUM) - - Assist data scientists in their projects (Virtual Assistant, NLP, - …) -- Skills: Artificial Intelligence (AI) · Machine Learning · MLOps · Python · Deep Learning · Data Science · Big Data · Agile Methodology · Project Management · Functional Programming · Jupyter · Pandas · Docker · Jira · Git · PostgreSQL · AWS SageMaker · Flask · UML · API REST · Terraform · Transformers · Natural Language Processing (NLP) · Data Engineering · Microsoft Azure Machine Learning · Neo4j - -## Project Manager & Machine Learning Engineer - -- Company: SFEIR Luxembourg -- Period: Dec 2020 - Dec 2021 -- Location: Luxembourg (Remote) -- Mission: Design and implement the next ML/MLOps platform on AWS and GCP. -- Main tasks and responsibilities: - - Design the functional & technical architecture of the platform - - Manage the MLOps@Decathlon initiative (tasks, plannings) - - Select the vendor solutions based on a user need analysis - - Communicate the progress and success to stack-holders - - Assist data scientists in their project (audience, forecast) -- Technical stack: - - Data Science: Python, TensorFlow, Spark, sklearn, Jupyter, Airflow - - Management: Google Workspace, Jira, UML, Terraform, Jenkins - - Environments: AWS (SageMaker), GCP (Vertex AI), DataBricks -- Skills: Artificial Intelligence (AI) · Machine Learning · MLOps · Python · Deep Learning · Data Science · Big Data · Agile Methodology · Project Management · Functional Programming · Google Cloud Platform (GCP) · Tensorflow · MLflow · Jupyter · Pandas · Docker · Keras · Jira · Git · DataBricks · Apache Airflow · AWS SageMaker · Flask · UML · Terraform · Data Engineering · Vertex AI (GCP) · Apache Spark · Scikit-Learn · Kubernetes - -## Data Scientist - -- Company: SFEIR -- Period: Oct 2020 - Nov 2020 -- Location: Luxembourg (Remote) -- Mission: Improve the visibility and assets of SFEIR's Data Team. -- Main tasks and responsibilities: - - Design and create technical interviews for recruiting data scientists. - - Become a Professional Machine Learning Engineer on Google Cloud. - - Propose a strategy to improve the online visibility of SFEIR data team. - - Share knowledge about data trends with non-technical staff members. - - Create a group to write tutorials and kata on AI/ML for SFEIR developers. -- Skills: Artificial Intelligence (AI) · Machine Learning · MLOps · Python · Deep Learning · Data Science · Agile Methodology · Functional Programming · Google Cloud Platform (GCP) · Tensorflow · Jupyter · Pandas · Keras · Git · MongoDB · Vertex AI (GCP) · Apache Spark · Scikit-Learn - -## Data Scientist - -- Company: SFEIR -- Customer: ArcelorMittal -- Period: Jan 2020 - Sep 2020 -- Location: Luxembourg (Remote) -- Mission: Train and optimize machine learning models to recommend steel prices. -- Main tasks and responsibilities: - - Create and fine-tune machine-learning models (tree-based) - - Evaluate the performance of the model on real datasets - - Communicate the results to business stack-holders -- Technical stack: - - Data Science: Python, XGBoost, sklearn, Jupyter, SQL - - Analytics: Matplotlib, Seaborn, Tableau, Plotly, Dash - - Environment: MS-SQL, Azure Cloud, Jira, Papermill -- Skills: Artificial Intelligence (AI) · Machine Learning · MLOps · Python · Data Science · Agile Methodology · Functional Programming · Jupyter · Pandas · Jira · Git · Natural Language Processing (NLP) · Scikit-Learn - -## Research And Development Specialist - -- Company: University of Luxembourg -- Period: Sep 2019 - Jan 2020 -- Location: Luxembourg -- Mission: Management and development of Natural Language Understanding (NLU) projects for BGL BNP Paribas. -- Skills: Artificial Intelligence (AI) · Machine Learning · Python · Data Science · Big Data · Functional Programming · Tensorflow · Jupyter · Pandas · Docker · Git · PostgreSQL · Ansible · Flask · UML · JSON · API REST · Transformers · Natural Language Processing (NLP) · Apache Spark · Scikit-Learn - -## Doctoral researcher - -- Company: University of Luxembourg -- Period: Sep 2015 - Jan 2020 -- Location: Luxembourg -- Missions: - - Research activities focused on Android security and artificial intelligence. - - Teaching big data, machine learning and Android programming to students. - - Collaboration with Google, San Francisco on finding malicious Android artifacts. -- Skills: Artificial Intelligence (AI) · Machine Learning · Python · Deep Learning · Data Science · Statistics · Big Data · Cybersecurity · Functional Programming · Jupyter · Pandas · Docker · Git · NoSQL · MongoDB · PostgreSQL · ElasticSearch · Ansible · Flask · JSON · Android · API REST · Natural Language Processing (NLP) · Data Engineering · Apache Spark · Scikit-Learn - -## Mentor for aspiring Data Scientist - -- Company: OpenClassrooms -- Period: Aug 2018 - Dec 2019 -- Location: France -- Mission: Tutoring adult students to become data scientists specializing in machine learning. -- Skills: Artificial Intelligence (AI) · Machine Learning · Python · Data Science · Jupyter · Pandas · Git · Flask · JSON · API REST · Scikit-Learn - -## Security engineer specialized in log management and analysis - -- Company: Clearstream -- Period: Apr 2014 - Aug 2015 -- Location: Luxembourg -- Mission: Selection and deployment of a SIEM solution, participating in security incident response. -- Skills: Python · Big Data · ISO 27001 · Cybersecurity · Jupyter · Pandas · Git · ElasticSearch · Data Engineering - -## Web developer and administrator - -- Company: Freaxmind -- Period: Aug 2011 - Aug 2013 -- Location: France -- Mission: Various contracts ranging from web development to software maintenance and debugging. -- Skills: Python · Object Oriented Programming (POO) · Git · Ansible · Flask - -## Web Developer - -- Company: Toul'embal -- Period: Jun 2012 - Aug 2012 -- Location: Toul, France -- Mission: Extension of a Prestashop e-commerce website and creation a portfolio website with WordPress. -- Skills: Object Oriented Programming (POO) - -## Web Programmer - -- Company: Empreinte Studio -- Period: Oct 2010 - Aug 2011 -- Location: Épernay, France -- Mission: Creation of modern website in PHP and MySQL with professional writers and graphic designers. -- Skills: Object Oriented Programming (POO) · Git - -## Software Developer - -- Company: GEOVARIANCES -- Period: Sep 2009 - Sep 2010 -- Location: Avon, France -- Mission: Development of a geostatistic application in C++ and Qt with experienced software engineers. -- Skills: Object Oriented Programming (POO) · Git · UML - -## Web Developer - -- Company: CV Champagne Nicolas Feuillatte -- Period: Apr 2009 - Aug 2009 -- Location: Épernay, France -- Mission: Integration of customer and share management modules to J.D. Edwards with PHP and Oracle. -- Skills: Object Oriented Programming (POO) - -# Education - -## Doctor of Philosophy (PhD) in computer security and artificial intelligence - -- School: University of Luxembourg -- Location: Luxembourg -- Grade: Very Good -- Period: 2015 - 2019 -- Activities and Societies: Teach Big Data and Android to students. -- Thesis title: Creating better ground truth to further understand Android malware - -## Master's degree in computer and information systems security - -- School: UFR Mathématiques, Informatique, Mécanique et Automatique -- Location: Metz (France) -- Period: 2013 - 2014 - -## Bachelor and master years in computer science applied to business informatics - -- School: UFR Mathématiques et Informatique de l’Université de Lorraine -- Location: Nancy (France) -- Period: 2011 - 2013 - -## Professional bachelor's degree in computer security and databases - -- School: IUT Sénart-Fontainebleau -- Location: Fontainebleau (France) -- Period: 2009 - 2010 - -## Professional bachelor’s degree in web development and integration - -- School: IUT Nancy-Charlemagne -- Location: Nancy (France) -- Period: 2008 - 2009 - -## Technical degree in network and software development - -- School: Lycée François 1er -- Location: Vitry-le-François (France) -- Period: 2006 - 2008 - -## Baccalauréat général degree in science, specialized in biology - -- School: Lycée Marc Chagall -- Location: Reims (France) -- Period: 2003 - 2006 - -# Volunteer Experiences - -## MLOps Community Organizer (Luxembourg) - -- Community: MLOps Community -- Role: Organizer -- Location: Luxembourg -- Period: Nov 2022 - present -- Field: Science and Technology -- Mission: Organize regular meetups and events for the MLOps Community. -- Partners: AWS and the University of Luxembourg. -- Link: https://www.meetup.com/luxembourg-mlops-community/ - -# Licenses & Certifications - -## Machine Learning Associate - -- Issuer: Databricks -- Issued: Nov 2022 -- Credential ID: 61461287 - -## Databricks Lakehouse Fundamentals - -- Issuer: Databricks -- Issued: Oct 2022 -- Credential ID: 61029028 - -## Architecting with Google Kubernetes Engine Specialization - -- Issuer: Google -- Issued: Sep 2022 -- Credential ID: WLU4DBPSQ4B5 - -## Architecting with Google Kubernetes Engine: Foundations - -- Issuer: Google -- Issued: Sep 2022 -- Credential ID: DFWAC6BXLNGL - -## Architecting with Google Kubernetes Engine: Production - -- Issuer: Google -- Issued: Sep 2022 -- Credential ID: K5SZHUST5HP2 - -## Architecting with Google Kubernetes Engine: Workloads - -- Issuer: Google -- Issued: Sep 2022 -- Credential ID: ULJQAXGDVKYK - -## Google Cloud Fundamentals: Core Infrastructure - -- Issuer: Google -- Issued: Sep 2022 -- Credential ID: 4CE8WQ6AWKFF - -## Iterative Tools for Data Scientists and Analysts - -- Issuer: Iterative -- Issued: Aug 2022 -- Credential ID: 62fcb79418f51945ea - -## Azure Data Scientist Associate - -- Issuer: Microsoft -- Issued: Jul 2022 -- Credential ID: 992564946 - -## Azure Machine Learning for Data Scientists - -- Issuer: Microsoft -- Issued: Jun 2022 -- Credential ID: MZKV7LSTQ9HX - -## Build and Operate Machine Learning Solutions with Azure Microsoft - -- Issuer: Microsoft -- Issued: Jun 2022 -- Credential ID: 7FBX68MH272C - -## Create Machine Learning Models in Microsoft Azure - -- Issuer: Microsoft -- Issued: Jun 2022 -- Credential ID: SHALM9PM3MPX - -## Microsoft Azure Data Scientist Associate - DP-100 Test Prep Specialization - -- Issuer: Microsoft -- Issued: Jun 2022 -- Credential ID: L5P3TYLAYLLT - -## Perform data science with Azure Databricks - -- Issuer: Microsoft -- Issued: Jun 2022 -- Credential ID: RQ7PLFYZVLXX - -## Prepare for DP-100: Data Science on Microsoft Azure Exam - -- Issuer: Microsoft -- Issued: Jun 2022 -- Credential ID: K5KW27AVMYS2 - -## Neo4j Graph Data Science Certified - -- Issuer: Neo4j -- Issued: Apr 2022 -- Credential ID: 17351346 - -## Microsoft Certified: Azure AI Fundamentals - -- Issuer: Microsoft -- Issued: Jan 2022 -- Credential ID: 1098-0884 - -## Artificial Intelligence on Microsoft Azure - -- Issuer: Microsoft -- Issued: Dec 2021 -- Credential ID: Z8FSWXBSAGLD - -## Computer Vision in Microsoft Azure - -- Issuer: Microsoft -- Issued: Dec 2021 -- Credential ID: KDDPYLKM2DA5 - -## Microsoft Azure AI Fundamentals AI-900 Exam Prep Specialization - -- Issuer: Microsoft -- Issued: Dec 2021 -- Credential ID: 96944QKZH9BU - -## Microsoft Azure Machine Learning - -- Issuer: Microsoft -- Issued: Dec 2021 -- Credential ID: 32ES25845Q55 - -## Natural Language Processing in Microsoft Azure - -- Issuer: Microsoft -- Issued: Dec 2021 -- Credential ID: XVN23N8CKRGY - -## Preparing for AI-900: Microsoft Azure AI Fundamentals exam - -- Issuer: Microsoft -- Issued: Dec 2021 -- Credential ID: YC83C22L8TBL - -## Build a Website on Google Cloud - -- Issuer: Google -- Issued: Aug 2021 - -## Build and Secure Networks in Google Cloud - -- Issuer: Google -- Issued: Aug 2021 - -## Create ML Models with BigQuery ML - -- Issuer: Google -- Issued: Aug 2021 - -## Create and Manage Cloud Resources - -- Issuer: Google -- Issued: Aug 2021 - -## Deploy to Kubernetes in Google Cloud - -- Issuer: Google -- Issued: Aug 2021 - -## Implement DevOps in Google Cloud - -- Issuer: Google -- Issued: Aug 2021 - -## Insights from Data with BigQuery - -- Issuer: Google -- Issued: Aug 2021 - -## Integrate with Machine Learning APIs - -- Issuer: Google -- Issued: Aug 2021 - -## Perform Foundational Infrastructure Tasks in Google Cloud - -- Issuer: Google -- Issued: Aug 2021 - -## Apache Spark Associate Developer - -- Issuer: Databricks -- Issued: Jun 2021 -- Credential ID: fff03919-bbc9-304e-99ad-6f2ed47455ed - -## Scalable Machine Learning with Apache Spark - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: 0f4adf96-0412-32f2-8232-fa50c51c9b47 - -## Apache Spark Programming with Databricks - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: 518a1d63-8894-3ab5-aaa5-50a9f169436c - -## Data Science Professional - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: f05164e1-5a78-37f8-9c69-3e996fdbb21f - -## Delta Lake Fundamentals Accreditation - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: 0d042e3f-50d3-3821-b064-f3c12ca6c17f - -## Deploying a Machine Learning Project with MLflow Projects - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: 2afa0c7f-48f4-35af-b366-f7c77d2cd20a - -## Tracking Experiments with MLflow - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: 0cbf87b7-e096-3792-a3b7-62d86aa6380d - -## Unified Data Analytics Accreditation - -- Issuer: Databricks -- Issued: May 2021 -- Credential ID: afba5402-b5e4-3f9e-95f2-51d6bbb5fa64 - -## ML Pipelines on Google Cloud - -- Issuer: Google -- Issued: Mar 2021 -- Credential ID: FN5PYWX5PRCP - -## Introduction to Trading, Machine Learning & GCP - -- Issuer: Google -- Issued: Nov 2020 -- Credential ID: YV9H5PF4YPLZ - -## MLOps (Machine Learning Operations) Fundamentals - -- Issuer: Google -- Issued: Nov 2020 -- Credential ID: 4BDA24UL7K9Z - -## Machine Learning for Trading Specialization - -- Issuer: Google -- Issued: Nov 2020 -- Credential ID: YSNPABSMV6JL - -## Reinforcement Learning for Trading Strategies - -- Issuer: Google -- Issued: Nov 2020 -- Credential ID: VHKJLFPLLDLU - -## Using Machine Learning in Trading and Finance - -- Issuer: Google -- Issued: Nov 2020 -- Credential ID: X5YYLBMPY4BU - -## DeepLearning.AI TensorFlow Developer Specialization - -- Issuer: DeepLearning.AI -- Issued: Oct 2020 -- Credential ID: LQ4GHWJ6URBS - -## Perform Foundational Data, ML, and AI Tasks in Google Cloud - -- Issuer: Google -- Issued: Oct 2020 - -## Professional Machine Learning Engineer - -- Issuer: Google -- Issued: Oct 2020 -- Credential ID: 24896478 - -## Sequences, Time Series and Prediction - -- Issuer: Google -- Issued: Oct 2020 -- Credential ID: WHBV68C4WJT5 - -## Convolutional Neural Networks in TensorFlow - -- Issuer: Google -- Issued: Sep 2020 -- Credential ID: 78HJEJZ3T2BB - -## Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning - -- Issuer: Google -- Issued: Sep 2020 -- Credential ID: SW885ZMDHTYM - -## Natural Language Processing in TensorFlow - -- Issuer: Google -- Issued: Sep 2020 -- Credential ID: JZ9TBHXJFLWM - -## Advanced Machine Learning with TensorFlow on Google Cloud Platform Specialization - -- Issuer: Google -- Issued: Jul 2020 -- Credential ID: V492QQ4JJKEB - -## End-to-End Machine Learning with TensorFlow on GCP - -- Issuer: Google -- Issued: Jul 2020 -- Credential ID: QLDMNADDBSRR - -## Image Understanding with TensorFlow on GCP - -- Issuer: Google -- Issued: Jul 2020 -- Credential ID: HY4HSSY8JSPN - -## Production Machine Learning Systems - -- Issuer: Google -- Issued: Jul 2020 -- Credential ID: THZZNW22LHKT - -## Recommendation Systems with TensorFlow on GCP - -- Issuer: Google -- Issued: Jul 2020 -- Credential ID: 2D4LT28697TC - -## Sequence Models for Time Series and Natural Language Processing - -- Issuer: Google -- Issued: Jul 2020 -- Credential ID: 6XUV7YJFM3ZA - -## Building Batch Data Pipelines on GCP - -- Issuer: Google -- Issued: May 2020 -- Credential ID: 5QYSK9E5EAFN - -## Building Resilient Streaming Analytics Systems on GCP - -- Issuer: Google -- Issued: May 2020 -- Credential ID: FYQW7D4F6PD4 - -## Data Engineering with Google Cloud Specialization - -- Issuer: Google -- Issued: May 2020 -- Credential ID: EPZ3WQFC423E - -## Modernizing Data Lakes and Data Warehouses with GCP - -- Issuer: Google -- Issued: May 2020 -- Credential ID: 393P3HLZWY8H - -## Smart Analytics, Machine Learning, and AI on GCP - -- Issuer: Google -- Issued: May 2020 -- Credential ID: AK77VUVN4ARJ - -## Google Cloud Platform Big Data and Machine Learning Fundamentals - -- Issuer: Google -- Issued: Apr 2020 -- Credential ID: 2Q35NYHYMW5E - -## Devenez Mentor Evaluateur - -- Issuer: OpenClassrooms -- Issued: Feb 2019 -- Credential ID: 8151214336 - -## Advanced AI: Deep Reinforcement Learning in Python - -- Issuer: Udemy -- Issued: Aug 2018 -- Credential ID: UC-5FM0CC9S - -## Artificial Intelligence: Reinforcement Learning in Python - -- Issuer: Udemy -- Issued: Jul 2018 -- Credential ID: UC-XALJEH7G - -## Concevez un site avec Flask - -- Issuer: OpenClassrooms -- Issued: Jul 2018 -- Credential ID: 5343531703 - -## Les étapes de la vie du Mentor - -- Issuer: OpenClassrooms -- Issued: Jul 2018 -- Credential ID: 8431716200 - -## Devenez Mentor chez OpenClassrooms - -- Issuer: OpenClassrooms -- Issued: May 2018 -- Credential ID: 6193593386 - -## Complete Guide to ElasticSearch - -- Issuer: Udemy -- Issued: Mar 2018 -- Credential ID: UC-H5AJQVA3 - -## Introduction to Hadoop - -- Issuer: The Linux Foundation -- Issued: Oct 2017 -- Credential ID: ad676a8fe7994edea33516b80b540971 - -## Artificial Intelligence Nanodegree - -- Issuer: Udacity -- Issued: Sep 2017 -- Credential ID: PV7A7EAA - -## High Performance Computing - -- Issuer: University of Luxembourg -- Issued: Feb 2017 - -## Machine Learning - -- Issuer: Standford University -- Issued: Sep 2015 -- Grade: 97% - -## TOEIC - -- Skills: Listening, Reading -- Issued: Jan 2014 -- Score: 975/990 - -# Publications - -## Is AI/ML Monitoring just Data Engineering? 🤔 - -- Publisher: MLOps Community -- Publication date: July 24, 2023 -- Link: https://mlops.community/is-ai-ml-monitoring-just-data-engineering-%f0%9f%a4%94/ - -While the future of machine learning and MLOps is being debated, -practitioners still need to attend to their machine learning models -in production. This is no easy task, as ML engineers must constantly -assess the quality of the data that enters and exits their -pipelines, and ensure that their models generate the correct -predictions. To assist ML engineers with this challenge, several -AI/ML monitoring solutions have been developed. -In this article, I will discuss the nature of AI/ML monitoring and -how it relates to data engineering. First, I will present the -similarities between AI/ML monitoring and data engineering. Second, -I will enumerate additional features that AI/ML monitoring solutions -can provide. Third, I will briefly touch on the topic of AI/ML -observability and its relation to AI/ML monitoring. Finally, I will -provide my conclusion about the field of AI/ML monitoring and how it -should be considered to ensure the success of your AI/ML project. - -## A great MLOps project should start with a good Python Package 🐍 - -- Publisher: MLOps Community -- Publication date: June 28, 2023 -- Link: https://mlops.community/a-great-mlops-project-should-start-with-a-good-python-package-%f0%9f%90%8d/ - -In this article, I present the implementation of a Python package on -GitHub designed to support MLOps initiatives. The goal of this -package is to make the coding workflow of data scientists and ML -engineers as flexible, robust, and productive as possible. First, I -start by motivating the use of Python packages. Then, I provide some -tools and tips you can include in your MLOps project. Finally, I -explain the follow-up steps required to take this package to the -next level and make it work in your environment. - -## Fixing the MLOps Survey on LLMs with ChatGPT API: Lessons Learned - -- Publisher: MLOps Community -- Publication date: May 11, 2023 -- Link: https://mlops.community/fixing-the-mlops-survey-on-llms-with-chatgpt-api-lessons-learned/ - -Large Language Model (LLM) is such an existing topic. Since the -release of ChatGPT, we saw a surge of innovation ranging from -education mentorship to finance advisory. Each week is a new -opportunity for addressing new kinds of problems, increasing human -productivity, or improving existing solutions. Yet, we may wonder if -this is just a new hype cycle or if organizations are truly adopting -LLMs at scale … -On March 2023, the MLOps Community issued a survey about LLMs in -production to picture the state of adoption. The survey is full of -interesting insights, but there is a catch: 80% of the questions are -open-ended, which means respondents answered the survey freely from -a few keywords to full sentences. I volunteered to clean up the -answers with the help of ChatGPT and let the community get a grasp -of the survey experiences. -In this article, I present the steps and lessons learned from my -journey to shed some light on the MLOps survey on LLMs. I’m first -going to present the goal and questions of the survey. Then, I will -explain how I used ChatGPT to review the data and standardize the -content. Finally, I’m going to evaluate the performance of ChatGPT -compared to a manual review. - -## Kubeflow: The Machine Learning Toolkit for Kubernetes - -- Publisher: MLOps Community in Luxembourg -- Publication date: April 26, 2023 -- Link: https://docs.google.com/presentation/d/1WtN5mNAHsz7PiWE5kKZUQfQ9_zO_kfVoSBNTHQM-sMk/edit#slide=id.p1 - -## MLflow: An open source platform for the machine learning lifecycle - -- Publisher: MLOps Community in Luxembourg -- Publication date: April 26, 2023 -- Link: https://docs.google.com/presentation/d/1fjoQ2DnTzhyaMI9dC6E2_RIKylW8hAGS5iQ6TapZfZo/edit#slide=id.g1e1b3ce30cd_0_0 - -## We need POSIX for MLOps - -- Publisher: MLOps Community -- Publication date: April 21, 2023 -- Link: https://mlops.community/we-need-posix-for-mlops/ - -If you work on MLOps, you must navigate an ever-growing landscape of -tools and solutions. This is both an intense source of stimulation -and fatigue for MLOps practitioners. -Vendors and users face the same problem: How can we combine all -these tools without the combinatorial complexity of creating custom -integrations? -In this article, I propose a solution analogous to POSIX to address -this challenge. First, I motivate the creation of common protocols -and schemas for combining MLOps tools. Second, I present a -high-level architecture to support implementation. Third, I conclude -with the benefits and limitations of standardizing MLOps. - -## How to install Kubeflow Pipelines v2 on Apple Silicon - -- Publisher: Medium -- Publication date: September 24, 2022 -- Link: https://fmind.medium.com/how-to-install-kubeflow-on-apple-silicon-3565db8773f3 - -Kubeflow Pipelines (KFP) is a powerful platform for building machine -learning pipelines at scale with Kubernetes. The platform is well -supported on major cloud platforms such as GCP (Vertex AI Pipelines) -or AWS (Kubeflow on AWS). However, installing KFP on Apple Silicon -(macOS 12.5.1 with Apple M1 Pro) proved to be more challenging than -I imagined. Thus, I wanted to share my experience and tips to -install KFP as easily as possible on your shiny Mac. -In this article, I present 4 steps to install Kubeflow on Apple -Silicon, using Rancher Desktop for setting up Docker/Kubernetes. In -the end, I list the problems I encountered during the installation -of Kubeflow Pipelines. - -## The Programming Trade-Off: Purpose, Productivity, Performance - -- Publisher: Medium -- Publication: August 15, 2019 -- Link: https://fmind.medium.com/3p-principle-purpose-productivity-performance-630bed7623fc - -As programmers, we are continuously looking for languages that are -performant, productive, and general purpose. Is there any -programming language that currently satisfies these properties? Can -we ever create one? -In this article, I present a fundamental trade-off that affects the -design of programming languages and the success of software -projects. - -## Creating better ground truth to further understand Android malware: A large scale mining approach based on antivirus labels and malicious artifacts - -- Publisher: University of Luxembourg -- Publication date: July 1, 2019 -- Link: https://orbilu.uni.lu/handle/10993/39903 - -Mobile applications are essential for interacting with technology -and other people. With more than 2 billion devices deployed all over -the world, Android offers a thriving ecosystem by making accessible -the work of thousands of developers on digital marketplaces such as -Google Play. Nevertheless, the success of Android also exposes -millions of users to malware authors who seek to siphon private -information and hijack mobile devices for their benefits. -To fight against the proliferation of Android malware, the security -community embraced machine learning, a branch of artificial -intelligence that powers a new generation of detection systems. -Machine learning algorithms, however, require a substantial number -of qualified samples to learn the classification rules enforced by -security experts. Unfortunately, malware ground truths are -notoriously hard to construct due to the inherent complexity of -Android applications and the global lack of public information about -malware. In a context where both information and human resources are -limited, the security community is in demand for new approaches to -aid practitioners to accurately define Android malware, automate -classification decisions, and improve the comprehension of Android -malware. -This dissertation proposes three solutions to assist with the -creation of malware ground truths. - -## Euphony: Harmonious Unification of Cacophonous Anti-Virus Vendor Labels for Android Malware - -- Publisher: MSR 2017 -- Publication date: May 21, 2017 -- Link: https://orbilu.uni.lu/handle/10993/31441 - -Android malware is now pervasive and evolving rapidly. Thousands of -malware samples are discovered every day with new models of attacks. -The growth of these threats has come hand in hand with the -proliferation of collective repositories sharing the latest -specimens. Having access to a large number of samples opens new -research directions aiming at efficiently vetting apps. However, -automatically inferring a reference ground-truth from those -repositories is not straightforward and can inadvertently lead to -unforeseen misconceptions. On the one hand, samples are often -mislabeled as different parties use distinct naming schemes for the -same sample. On the other hand, samples are frequently misclassified -due to conceptual errors made during labeling processes. -In this paper, we analyze the associations between all labels given -by different vendors and we propose a system called EUPHONY to -systematically unify common samples into family groups. The key -novelty of our approach is that no prior knowledge of malware -families is needed. We evaluate our approach using reference -datasets and more than 0.4 million additional samples outside of -these datasets. Results show that EUPHONY provides competitive -performance against the state-of-the-art. - -## On the Lack of Consensus in Anti-Virus Decisions: Metrics and Insights on Building Ground Truths of Android Malware - -- Publisher: DIMVA 2016 -- Publication date: July 7, 2016 -- Link: https://orbilu.uni.lu/handle/10993/27845 - -There is generally a lack of consensus in Antivirus (AV) engines' -decisions on a given sample. This challenges the building of -authoritative ground-truth datasets. Instead, researchers and -practitioners may rely on unvalidated approaches to build their -ground truth, e.g., by considering decisions from a selected set of -Antivirus vendors or by setting up a threshold number of positive -detections before classifying a sample. Both approaches are biased -as they implicitly either decide on ranking AV products, or they -consider that all AV decisions have equal weights. In this paper, we -extensively investigate the lack of agreement among AV engines. -To that end, we propose a set of metrics that quantitatively -describe the different dimensions of this lack of consensus. We show -how our metrics can bring important insights by using the detection -results of 66 AV products on 2 million Android apps as a case study. -Our analysis focuses not only on AV binary decision but also on the -notoriously hard problem of labels that AVs associate with -suspicious files, and allows to highlight biases hidden in the -collection of a malware ground truth---a foundation stone of any -machine learning-based malware detection approach. - -# Projects - -## MLOps Python Package - -- Date: Jun 2023 - Jun 2023 -- Description: Kickstart your MLOps initiative with a flexible, robust, and productive Python package. -- Link: https://github.com/fmind/mlops-python-package - -## Fixing the MLOps Survey with ChatGPT - -- Date: May 2023 - May 2023 -- Description: Fixing the MLOps Survey on LLMs with ChatGPT API. -- Link: https://fmind.medium.com/fixing-the-mlops-survey-on-llms-with-chatgpt-api-lessons-learned-62d90e721331 - -## Kubeflow Demo - -- Date: Apr 2023 - Apr 2023 -- Description: Kubeflow demo for the MLOps Community Meetup in Luxembourg. -- Link: https://github.com/fmind/kubeflow-demo - -## MLflow Demo - -- Date: Apr 2023 - Apr 2023 -- Description: MLflow demo for the MLOps Community Meetup in Luxembourg. -- Link: https://github.com/fmind/mlflow-demo - -## onet - -- Date: Aug 2020 - Sep 2020 -- Description: Train and predict procedures of DNN for binary image classification -- Link: https://github.com/fmind/onet - -## fincrawl - -- Date: Nov 2019 - Dec 2019 -- Description: Crawl documents, metadata, and files from financial institutions -- Link: https://github.com/fmind/fincrawl - -## invest - -- Date: Aug 2019 - Sep 2019 -- Description: Stock market analysis focused on dividends -- Link: https://github.com/fmind/invest - -## parsoc - -- Date: Jul 2019 - Sep 2019 -- Description: Convert docx files to json -- Link: https://github.com/fmind/parsoc - -## Bigdata Tutorials - -- Date: Sep 2015 - Jul 2019 -- Description: Tutorials for the Big Data course @ uni.lu -- Link: https://github.com/fmind/bigdata-tutorials - -## STASE: A set of statistical metrics to better understand and qualify malware datasets - -- Date: Apr 2016 - Jul 2019 -- Description: A handful of statistical metrics to better understand and qualify malware datasets -- Link: https://github.com/fmind/STASE - -## apkworkers - -- Date: Sep 2015 - Jul 2019 -- Description: A celery application to distribute Android malware analysis -- Link: https://github.com/fmind/apkworkers - -## servalx - -- Date: Sep 2015 - Jul 2019 -- Description: A set of tools and modules to process Android malware with Androzoo -- Link: https://github.com/fmind/servalx - -## Euphony: Harmonious Unification of Cacophonous Anti-Virus Vendor Labels for Android Malware - -- Date: Mar 2017 - Mar 2019 -- Description: Harmonious Unification of Cacophonous Anti-Virus Vendor Labels for Android Malware -- Link: https://github.com/fmind/euphony - -## Automatic Speech Recognition with Tensorflow - -- Date: Sep 2017 - Sep 2017 -- Description: An automatic speech-recognition system based on Tensorflow -- Link: https://github.com/fmind/AIND-VUI-Capstone - -## Dog Recognition with Tensorflow - -- Date: Aug 2017 - Aug 2017 -- Description: A machine-learning model train to recognize dogs, even from human faces -- Link: https://github.com/fmind/dog-project - -## genius - -- Date: Jun 2017 - Jul 2017 -- Description: An implementation of LISP Scheme based on Haskell -- Link: https://github.com/fmind/genius - -## Alexa History Skill - -- Date: Jun 2017 - Jun 2017 -- Description: A Alexa skill that provides year-dated facts on demand -- Link: https://github.com/fmind/AIND-VUI-Alexa - -## Air Cargo Planning System - -- Date: Feb 2017 - Apr 2017 -- Description: An automated Air Cargo transport system based on AI planning -- Link: https://github.com/fmind/AIND-Planning - -## Sign Language Recognition System - -- Date: Feb 2017 - Apr 2017 -- Description: A sign recognition system based on Hidden Markov Model -- Link: https://github.com/fmind/AIND-Recognizer - -## AI Agent for the Isolation Game - -- Date: Mar 2017 - Mar 2017 -- Description: An AI game agent to play the Isolation game -- Link: https://github.com/fmind/AIND-Isolation - -## Sudoku Solver - -- Date: Jan 2017 - Feb 2017 -- Description: A Diagonal Sudoku solver implemented with Python -- Link: https://github.com/fmind/AIND-Sudoku - -## lkml - -- Date: Nov 2016 - Jan 2017 -- Description: Gather emails from https://lkml.org/ -- Link: https://github.com/fmind/lkml - -## Master 2 School Projects - -- Date: Sep 2013 - Jun 2014 -- Description: School projects from 2013 to 2014 - Master 2 Sécurité des Systèmes d'Information (Metz) -- Link: https://github.com/fmind/master2-projects - -## chattail - -- Date: Dec 2013 - Mar 2014 -- Description: Send log streams over XMPP to monitor your systems -- Link: https://github.com/fmind/chattail - -## Master 1 School Projects - -- Date: Jun 2012 - Sep 2013 -- Link: https://github.com/fmind/master1-projects - -## Bachelor School Projects - -- Date: Jun 2011 - Sep 2012 -- Link: https://github.com/fmind/bachelor-projects - -## Professional Bachelor School Project - -- Date: Sep 2009 - Jun 2010 -- Link: https://github.com/fmind/professional-bachelor-project - -# Skills - -## Artificial Intelligence / Machine Learning - -- Artificial Intelligence (AI) -- Machine Learning -- MLOps -- Deep Learning -- Data Science -- Statistics -- ChatGPT -- Scikit-Learn -- Tensorflow -- KubeFlow -- MLflow -- Jupyter -- Pandas -- Keras -- DVC -- Transformers -- Natural Language Processing (NLP) - -## Software Engineering - -- Python -- Functional Programming (FP) -- Object-Oriented Programming (OOP) -- API REST -- Android -- Docker -- JSON -- Git - -## Cloud Platforms - -- AWS SageMaker -- Vertex AI (GCP) -- Google Cloud Platform (GCP) -- Microsoft Azure Machine Learning -- Apache Airflow -- Kubernetes -- DataBricks -- Terraform -- Ansible -- Linux - -## Computer Security - -- ISO 27001 -- Cybersecurity - -## Data Management - -- NoSQL -- MongoDB -- Big Data -- PostgreSQL -- Apache Spark -- ElasticSearch -- Data Engineering - -## Project Management - -- Agile Methodology -- Project Management -- Jira -- UML - -# Languages - -## Français - -- Proficiency: Native or bilingual proficiency - -## English - -- Proficiency: Full professional proficiency diff --git a/spaces/gabibi7am/rvc-models/infer_pack/models_onnx.py b/spaces/gabibi7am/rvc-models/infer_pack/models_onnx.py deleted file mode 100644 index 3cdae2f7f8591a1e43b1d8520baa37b7e9744d72..0000000000000000000000000000000000000000 --- a/spaces/gabibi7am/rvc-models/infer_pack/models_onnx.py +++ /dev/null @@ -1,849 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/giulio98/codebleu/parsercode/build.py b/spaces/giulio98/codebleu/parsercode/build.py deleted file mode 100644 index 6cf4271ac1cc906202ad0a56c27b0de1e671d73e..0000000000000000000000000000000000000000 --- a/spaces/giulio98/codebleu/parsercode/build.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -from tree_sitter import Language, Parser - -Language.build_library( - # Store the library in the `build` directory - 'my-languages.so', - - # Include one or more languages - [ - 'tree-sitter-python', - 'tree-sitter-cpp' - ] -) - diff --git a/spaces/gotiQspiryo/whisper-ui/examples/How to Download Final Fantasy XIV for Mac and Enjoy the Free Trial.md b/spaces/gotiQspiryo/whisper-ui/examples/How to Download Final Fantasy XIV for Mac and Enjoy the Free Trial.md deleted file mode 100644 index 3633dcc5d34412680ebce9e6e3a9d5f0acd42fec..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/How to Download Final Fantasy XIV for Mac and Enjoy the Free Trial.md +++ /dev/null @@ -1,12 +0,0 @@ -
              -

              Customers who have already purchased these titles will still be able to play or re-download them, but please be aware that there may be situations where we are unable to guarantee correct function if the environment they are used on changes.

              -

              The newest expansion, Final Fantasy XIV Endwalker, will be out this December 3, 2021, and several players are encountering the "Unable to download patch files" error, so they can't get the update - Here's how you could try to fix it to enjoy FF14 Endwalker at launch.

              -

              final fantasy xiv download mac


              Download ⚙⚙⚙ https://urlgoal.com/2uyNeX



              -

              The "unable to download patch files" error is a pretty common problem with FF14 and a recurrent issue happening with each update. Players who've encountered this issue in the past regularly mentioned it's a firewall issue or a permissions issue. So here are the methods you could try to fix the problem.

              -

              To disable the Windows firewall, go to Control Panel, System and Security, and then Windows firewall. Click "Turn off Windows Firewall (not recommended) for both Private and Public network settings". And then try downloading the FF14 Endwalker update again. Once you're done be sure to reactivate the firewall.

              -

              You can easily do that by deleting the "4e9a232b" folder of FF14. You can find it in Users, -name of the user you're logged on with on Windows-, Documents, My Games, Final Fantasy XIV A Realm Reborn, Downloads, and then Patch. In there, delete the folder "4e9a232b". And then try downloading the update again.

              -

              Disclosures: This game was obtained via paid digital download and reviewed on the PC. Approximately 80 hours of play was devoted to multiplayer modes, and there is no offline option available.

              -

              In the Official MMORPG.com Review, our own Adam Tingle takes a look at the tank simulation MMO that's taking the world by storm and divulges whether you should even bother with the download. The short answer? Yes. Read on!

              -

              aaccfb2cb3
              -
              -
              \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/fast_noisy_channel/README.md b/spaces/gradio/HuBERT/examples/fast_noisy_channel/README.md deleted file mode 100644 index a04151a796e4e092fa3c803a1679ab521af96aeb..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/fast_noisy_channel/README.md +++ /dev/null @@ -1,345 +0,0 @@ -# Language Models not just for Pre-training: Fast Online Neural Noisy Channel Modeling - -## Introduction -- [Yee et al. (2019)](https://www.aclweb.org/anthology/D19-1571.pdf) introduce a simple and effective noisy channel modeling approach for neural machine translation. However, the noisy channel online decoding approach introduced in this paper is too slow to be practical. -- To address this, [Bhosale et al. (2020)](http://www.statmt.org/wmt20/pdf/2020.wmt-1.68.pdf) introduces 3 simple approximations to make this approach very fast and practical without much loss in accuracy. -- This README provides intructions on how to run online decoding or generation with the noisy channel modeling approach, including ways to make it very fast without much loss in accuracy. - -## Noisy Channel Modeling - -[Yee et al. (2019)](https://www.aclweb.org/anthology/D19-1571.pdf) applies the Bayes Rule to predict `P(y|x)`, the probability of the target `y` given the source `x`. -```P(y|x) = P(x|y) * P(y) / P(x)``` -- `P(x|y)` predicts the source `x` given the target `y` and is referred to as the **channel model** -- `P(y)` is a **language model** over the target `y` -- `P(x)` is generally not modeled since it is constant for all `y`. - -We use Transformer models to parameterize the direct model `P(y|x)`, the channel model `P(x|y)` and the language model `P(y)`. - -During online decoding with beam search, we generate the top `K2` candidates per beam and score them with the following linear combination of the channel model, the language model as well as the direct model scores. - -```(1 / t) * log(P(y|x) + (1 / s) * ( λ1 * log(P(x|y)) + λ2 * log(P(y) ) )``` -- `t` - Target Prefix Length -- `s` - Source Length -- `λ1` - Channel Model Weight -- `λ2` - Language Model Weight - -The top `beam_size` candidates based on the above combined scores are chosen to continue the beams in beam search. In beam search with a direct model alone, the scores from the direct model `P(y|x)` are used to choose the top candidates in beam search. - -This framework provides a great way to utlize strong target language models trained on large amounts of unlabeled data. Language models can prefer targets unrelated to the source, so we also need a channel model whose role is to ensure that the target preferred by the language model also translates back to the source. - -### Training Translation Models and Language Models - -For training Transformer models in fairseq for machine translation, refer to instructions [here](https://github.com/pytorch/fairseq/tree/master/examples/translation) - -For training Transformer models in fairseq for language modeling, refer to instructions [here](https://github.com/pytorch/fairseq/tree/master/examples/language_model) - -### Generation with Language Model for German-English translation with fairseq - -Here are instructions to generate using a direct model and a target-side language model. - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt - -k2=10 -lenpen=0.16 -lm_wt=0.14 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --k2 ${k2} \ - --combine-method lm_only \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --gen-subset valid \ - --remove-bpe \ - --fp16 \ - --batch-size 10 -``` -### Noisy Channel Generation for German-English translation with fairseq - -Here are instructions for noisy channel generation with a direct model, channel model and language model as explained in section [Noisy Channel Modeling](#noisy-channel-modeling). - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -ch_model=en_de.big.seed4.pt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed4.pt -O ${ch_model} - -k2=10 -lenpen=0.21 -lm_wt=0.50 -bw_wt=0.30 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --channel-model ${ch_model} \ - --k2 ${k2} \ - --combine-method noisy_channel \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --ch-wt ${bw_wt} \ - --gen-subset test \ - --remove-bpe \ - --fp16 \ - --batch-size 1 -``` -## Fast Noisy Channel Modeling - -[Bhosale et al. (2020)](http://www.statmt.org/wmt20/pdf/2020.wmt-1.68.pdf) introduces 3 approximations that speed up online noisy channel decoding - -- Smaller channel models (`Tranformer Base` with 1 encoder and decoder layer each vs. `Transformer Big`) - - This involves training a channel model that is possibly smaller and less accurate in terms of BLEU than a channel model of the same size as the direct model. - - Since the role of the channel model is mainly to assign low scores to generations from the language model if they don't translate back to the source, we may not need the most accurate channel model for this purpose. -- Smaller output vocabulary size for the channel model (~30,000 -> ~1000) - - The channel model doesn't need to score the full output vocabulary, it just needs to score the source tokens, which are completely known. - - This is specified using the arguments `--channel-scoring-type src_vocab --top-k-vocab 500` - - This means that the output vocabulary for the channel model will be the source tokens for all examples in the batch and the top-K most frequent tokens in the vocabulary - - This reduces the memory consumption needed to store channel model scores significantly -- Smaller number of candidates (`k2`) scored per beam - - This is specified by reducing the argument `--k2` - - -### Fast Noisy Channel Generation for German-English translation with fairseq - -Here are instructions for **fast** noisy channel generation with a direct model, channel model and language model as explained in section [Fast Noisy Channel Modeling](#fast-noisy-channel-modeling). The main differences are that we use a smaller channel model, reduce `--k2`, set `--channel-scoring-type src_vocab --top-k-vocab 500` and increase the `--batch-size`. - -Note: -- Download and install fairseq as per instructions [here](https://github.com/pytorch/fairseq) -- Preprocess and binarize the dataset as per instructions in section [Test Data Preprocessing](#test-data-preprocessing) - -```sh -binarized_data=data_dir/binarized -direct_model=de_en_seed4.pt -lm_model=en_lm.pt -lm_data=lm_data -small_ch_model=en_de.base_1_1.seed4.pt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt -O ${direct_model} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt -O ${lm_model} -mkdir -p ${lm_data} -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/dict.txt -O ${lm_data}/dict.txt -wget https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed4.pt -O ${small_ch_model} - -k2=3 -lenpen=0.23 -lm_wt=0.58 -bw_wt=0.26 -fairseq-generate ${binarized_data} \ - --user-dir examples/fast_noisy_channel \ - --beam 5 \ - --path ${direct_model} \ - --lm-model ${lm_model} \ - --lm-data ${lm_data} \ - --channel-model ${small_ch_model} \ - --k2 ${k2} \ - --combine-method noisy_channel \ - --task noisy_channel_translation \ - --lenpen ${lenpen} \ - --lm-wt ${lm_wt} \ - --ch-wt ${bw_wt} \ - --gen-subset test \ - --remove-bpe \ - --fp16 \ - --batch-size 50 \ - --channel-scoring-type src_vocab --top-k-vocab 500 -``` - -## Test Data Preprocessing - -For preprocessing and binarizing the test sets for Romanian-English and German-English translation, we use the following script - - -```sh -FAIRSEQ=/path/to/fairseq -cd $FAIRSEQ -SCRIPTS=$FAIRSEQ/mosesdecoder/scripts -if [ ! -d "${SCRIPTS}" ]; then - echo 'Cloning Moses github repository (for tokenization scripts)...' - git clone https://github.com/moses-smt/mosesdecoder.git -fi -TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl -NORMALIZE=$SCRIPTS/tokenizer/normalize-punctuation.perl - -s=de -t=en -test=wmt18 - -mkdir -p data_dir - -# Tokenization -if [ $s == "ro" ] ; then - # Note: Get normalise-romanian.py and remove-diacritics.py from - # https://github.com/rsennrich/wmt16-scripts/tree/master/preprocess - sacrebleu -t $test -l $s-$t --echo src | \ - $NORMALIZE -l $s | \ - python normalise-romanian.py | \ - python remove-diacritics.py | \ - $TOKENIZER -l $s -a -q > data_dir/$test.$s-$t.$s -else - sacrebleu -t $test -l $s-$t --echo src | perl $NORMALIZE -l $s | perl $TOKENIZER -threads 8 -a -l $s > data_dir/$test.$s-$t.$s -fi - -sacrebleu -t $test -l $s-$t --echo ref | perl $NORMALIZE -l $t | perl $TOKENIZER -threads 8 -a -l $t > data_dir/$test.$s-$t.$t - - -# Applying BPE -src_bpe_code=/path/to/source/language/bpe/code -tgt_bpe_code=/path/to/target/language/bpe/code -src_dict=/path/to/source/language/dict -tgt_dict=/path/to/target/language/dict - -FASTBPE=$FAIRSEQ/fastBPE -if [ ! -d "${FASTBPE}" ] ; then - git clone https://github.com/glample/fastBPE.git - # Follow compilation instructions at https://github.com/glample/fastBPE - g++ -std=c++11 -pthread -O3 fastBPE/main.cc -IfastBPE -o fast -fi - -${FASTBPE}/fast applybpe data_dir/bpe.$test.$s-$t.$s data_dir/$test.$s-$t.$s ${src_bpe_code} -${FASTBPE}/fast applybpe data_dir/bpe.$test.$s-$t.$s data_dir/$test.$s-$t.$s ${tgt_bpe_code} - -fairseq-preprocess -s $s -t $t \ - --testpref data_dir/bpe.$test.$s-$t \ - --destdir data_dir/binarized \ - --srcdict ${src_dict} \ - --tgtdict ${tgt_dict} -``` - -## Calculating BLEU - -```sh -DETOKENIZER=$SCRIPTS/tokenizer/detokenizer.perl -cat ${generation_output} | grep -P "^H" | sort -V | cut -f 3- | $DETOKENIZER -l $t -q -a | sacrebleu -t $test -l $s-$t -``` - - -## Romanian-English Translation - -The direct and channel models are trained using bitext data (WMT16) combined with backtranslated data (The monolingual data used for backtranslation comes from http://data.statmt.org/rsennrich/wmt16_backtranslations/ (Sennrich et al., 2016c)) - -The backtranslated data is generated using an ensemble of 3 English-Romanian models trained on bitext training data (WMT16) with unrestricted sampling. - -### BPE Codes and Dictionary - -We learn a joint BPE vocabulary of 18K types on the bitext training data which is used for both the source and target. -||Path| -|----------|------| -| BPE Code | [joint_bpe_18k](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/bpe_18k) | -| Dictionary | [dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/dict) | - -### Direct Models -For Ro-En with backtranslation, the direct and channel models use a Transformer-Big architecture. - -| Seed | Model | -|----|----| -| 2 | [ro_en_seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed2.pt) -| 4 | [ro_en_seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed4.pt) -| 6 | [ro_en_seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/direct_models/seed6.pt) - -### Channel Models -For channel models, we follow the same steps as for the direct models. But backtranslated data is generated in the opposite direction using [this Romanian monolingual data](http://data.statmt.org/rsennrich/wmt16_backtranslations/). -The best lenpen, LM weight and CH weight are obtained by sweeping over the validation set (wmt16/dev) using beam 5. -| Model Size | Lenpen | LM Weight | CH Weight | Seed 2 | Seed 4 | Seed 6 | -|----|----|----|----|----|----|----| -| `big` | 0.84 | 0.64 | 0.56 | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | [big.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/big.seed2.pt) | -| `base_1_1` | 0.63 | 0.40 | 0.37 | [base_1_1.seed2.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed2.pt) | [base_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed4.pt) | [base_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/channel_models/base_1_1.seed6.pt) | - -### Language Model -The model is trained on de-duplicated English Newscrawl data from 2007-2018 comprising 186 million sentences or 4.5B words after normalization and tokenization. -| | Path | -|----|----| -| `--lm-model` | [transformer_en_lm](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/lm_model/transformer_lm.pt) | -| `--lm-data` | [lm_data](https://dl.fbaipublicfiles.com/fast_noisy_channel/ro_en/lm_model/lm_dict) - -## German-English Translation - -### BPE Codes and Dictionaries - -| | Path| -|----------|------| -| Source BPE Code | [de_bpe_code_24K](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/de_bpe_code_24K) | -| Target BPE Code | [en_bpe_code_24K](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/en_bpe_code_24K) -| Source Dictionary | [de_dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/de_dict) | -| Target Dictionary | [en_dict](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/en_dict) | - -### Direct Models -We train on WMT’19 training data. Following [Ng et al., 2019](http://statmt.org/wmt19/pdf/53/WMT33.pdf), we apply language identification filtering and remove sentences longer than 250 tokens as well as sentence pairs with a source/target length ratio exceeding 1.5. This results in 26.8M sentence pairs. -We use the Transformer-Big architecture for the direct model. - -| Seed | Model | -|:----:|----| -| 4 | [de_en_seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed4.pt) -| 5 | [de_en_seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed5.pt) -| 6 | [de_en_seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/direct_models/seed6.pt) - -### Channel Models - -We train on WMT’19 training data. Following [Ng et al., 2019](http://statmt.org/wmt19/pdf/53/WMT33.pdf), we apply language identification filtering and remove sentences longer than 250 tokens as well as sentence pairs with a source/target length ratio exceeding 1.5. This results in 26.8M sentence pairs. - -| Model Size | Seed 4 | Seed 5 | Seed 6 | -|----|----|----|----| -| `big` | [big.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed4.pt) | [big.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed5.pt) | [big.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big.seed6.pt) | -| `big_1_1` | [big_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed4.pt) | [big_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed5.pt) | [big_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/big_1_1.seed6.pt) | -| `base` | [base.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed4.pt) | [base.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed5.pt) | [base.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base.seed6.pt) | -| `base_1_1` | [base_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed4.pt) | [base_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed5.pt) | [base_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/base_1_1.seed6.pt) | -| `half` | [half.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed4.pt) | [half.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed5.pt) | [half.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half.seed6.pt) | -| `half_1_1` | [half_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed4.pt) | [half_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed5.pt) | [half_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/half_1_1.seed6.pt) | -| `quarter` | [quarter.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed4.pt) | [quarter.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed5.pt) | [quarter.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter.seed6.pt) | -| `quarter_1_1` | [quarter_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed4.pt) | [quarter_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed5.pt) | [quarter_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/quarter_1_1.seed6.pt) | -| `8th` | [8th.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed4.pt) | [8th.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed5.pt) | [8th.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th.seed6.pt) | -| `8th_1_1` | [8th_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed4.pt) | [8th_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed5.pt) | [8th_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/8th_1_1.seed6.pt) | -| `16th` | [16th.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed4.pt) | [16th.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed5.pt) | [16th.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th.seed6.pt) | -| `16th_1_1` | [16th_1_1.seed4.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed4.pt) | [16th_1_1.seed5.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed5.pt) | [16th_1_1.seed6.pt](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/channel_models/16th_1_1.seed6.pt) | - -### Language Model -The model is trained on de-duplicated English Newscrawl data from 2007-2018 comprising 186 million sentences or 4.5B words after normalization and tokenization. -| | Path | -|----|----| -| `--lm-model` | [transformer_en_lm](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/transformer_lm.pt) | -| `--lm-data` | [lm_data](https://dl.fbaipublicfiles.com/fast_noisy_channel/de_en/lm_model/lm_dict/) - - -## Citation - -```bibtex -@inproceedings{bhosale2020language, - title={Language Models not just for Pre-training: Fast Online Neural Noisy Channel Modeling}, - author={Shruti Bhosale and Kyra Yee and Sergey Edunov and Michael Auli}, - booktitle={Proceedings of the Fifth Conference on Machine Translation (WMT)}, - year={2020}, -} - -@inproceedings{yee2019simple, - title={Simple and Effective Noisy Channel Modeling for Neural Machine Translation}, - author={Yee, Kyra and Dauphin, Yann and Auli, Michael}, - booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)}, - pages={5700--5705}, - year={2019} -} -``` diff --git a/spaces/gradio/HuBERT/examples/speech_recognition/new/README.md b/spaces/gradio/HuBERT/examples/speech_recognition/new/README.md deleted file mode 100644 index 5fa0e97245d3ba6db69d11222261b0644960183d..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/speech_recognition/new/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Flashlight Decoder - -This script runs decoding for pre-trained speech recognition models. - -## Usage - -Assuming a few variables: - -```bash -checkpoint= -data= -lm_model= -lexicon= -``` - -Example usage for decoding a fine-tuned Wav2Vec model: - -```bash -python $FAIRSEQ_ROOT/examples/speech_recognition/new/infer.py --multirun \ - task=audio_pretraining \ - task.data=$data \ - task.labels=ltr \ - common_eval.path=$checkpoint \ - decoding.type=kenlm \ - decoding.lexicon=$lexicon \ - decoding.lmpath=$lm_model \ - dataset.gen_subset=dev_clean,dev_other,test_clean,test_other -``` - -Example usage for using Ax to sweep WER parameters (requires `pip install hydra-ax-sweeper`): - -```bash -python $FAIRSEQ_ROOT/examples/speech_recognition/new/infer.py --multirun \ - hydra/sweeper=ax \ - task=audio_pretraining \ - task.data=$data \ - task.labels=ltr \ - common_eval.path=$checkpoint \ - decoding.type=kenlm \ - decoding.lexicon=$lexicon \ - decoding.lmpath=$lm_model \ - dataset.gen_subset=dev_other -``` diff --git a/spaces/gryhkn/free-fast-youtube-url-video-to-text-using-openai-whisper/README.md b/spaces/gryhkn/free-fast-youtube-url-video-to-text-using-openai-whisper/README.md deleted file mode 100644 index ece5586a3ae3a4682cd9db3337a33250c589b479..0000000000000000000000000000000000000000 --- a/spaces/gryhkn/free-fast-youtube-url-video-to-text-using-openai-whisper/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Free Youtube URL Video-to-Text Using OpenAI Whisper -emoji: 📚 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.11.0 -app_file: app.py -pinned: false -license: gpl-3.0 -duplicated_from: SteveDigital/free-fast-youtube-url-video-to-text-using-openai-whisper ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Chat/MemoizedChatMessage.tsx b/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Chat/MemoizedChatMessage.tsx deleted file mode 100644 index 125d23d876450d5a49852f13d32a866f29dcc111..0000000000000000000000000000000000000000 --- a/spaces/gsaivinay/Llama-2-13B-GGML-UI/components/Chat/MemoizedChatMessage.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from "react"; -import { ChatMessage, Props } from "./ChatMessage"; - -export const MemoizedChatMessage: FC = memo( - ChatMessage, - (prevProps, nextProps) => ( - prevProps.message.content === nextProps.message.content - ) -); diff --git a/spaces/gyugnsu/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py b/spaces/gyugnsu/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py deleted file mode 100644 index b9cce427e5374c5ddce90199e1184f84a13d30c5..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/PTI/training/projectors/w_plus_projector.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Project given image to the latent space of pretrained network pickle.""" - -import copy -import wandb -import numpy as np -import torch -import torch.nn.functional as F -from tqdm import tqdm -from configs import global_config, hyperparameters -import dnnlib -from utils.log_utils import log_image_from_w - - -def project( - G, - target: torch.Tensor, # [C,H,W] and dynamic range [0,255], W & H must match G output resolution - *, - num_steps=1000, - w_avg_samples=10000, - initial_learning_rate=0.01, - initial_noise_factor=0.05, - lr_rampdown_length=0.25, - lr_rampup_length=0.05, - noise_ramp_length=0.75, - regularize_noise_weight=1e5, - verbose=False, - device: torch.device, - use_wandb=False, - initial_w=None, - image_log_step=global_config.image_rec_result_log_snapshot, - w_name: str -): - assert target.shape == (G.img_channels, G.img_resolution, G.img_resolution) - - def logprint(*args): - if verbose: - print(*args) - - G = copy.deepcopy(G).eval().requires_grad_(False).to(device).float() # type: ignore - - # Compute w stats. - logprint(f'Computing W midpoint and stddev using {w_avg_samples} samples...') - z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim) - w_samples = G.mapping(torch.from_numpy(z_samples).to(device), None) # [N, L, C] - w_samples = w_samples[:, :1, :].cpu().numpy().astype(np.float32) # [N, 1, C] - w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C] - w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device) - w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5 - - start_w = initial_w if initial_w is not None else w_avg - - # Setup noise inputs. - noise_bufs = {name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name} - - # Load VGG16 feature detector. - url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' - with dnnlib.util.open_url(url) as f: - vgg16 = torch.jit.load(f).eval().to(device) - - # Features for target image. - target_images = target.unsqueeze(0).to(device).to(torch.float32) - if target_images.shape[2] > 256: - target_images = F.interpolate(target_images, size=(256, 256), mode='area') - target_features = vgg16(target_images, resize_images=False, return_lpips=True) - - start_w = np.repeat(start_w, G.mapping.num_ws, axis=1) - w_opt = torch.tensor(start_w, dtype=torch.float32, device=device, - requires_grad=True) # pylint: disable=not-callable - - optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), - lr=hyperparameters.first_inv_lr) - - # Init noise. - for buf in noise_bufs.values(): - buf[:] = torch.randn_like(buf) - buf.requires_grad = True - - for step in tqdm(range(num_steps)): - - # Learning rate schedule. - t = step / num_steps - w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 - lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) - lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) - lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) - lr = initial_learning_rate * lr_ramp - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - # Synth images from opt_w. - w_noise = torch.randn_like(w_opt) * w_noise_scale - ws = (w_opt + w_noise) - - synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True) - - # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. - synth_images = (synth_images + 1) * (255 / 2) - if synth_images.shape[2] > 256: - synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') - - # Features for synth images. - synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) - dist = (target_features - synth_features).square().sum() - - # Noise regularization. - reg_loss = 0.0 - for v in noise_bufs.values(): - noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d() - while True: - reg_loss += (noise * torch.roll(noise, shifts=1, dims=3)).mean() ** 2 - reg_loss += (noise * torch.roll(noise, shifts=1, dims=2)).mean() ** 2 - if noise.shape[2] <= 8: - break - noise = F.avg_pool2d(noise, kernel_size=2) - loss = dist + reg_loss * regularize_noise_weight - - if step % image_log_step == 0: - with torch.no_grad(): - if use_wandb: - global_config.training_step += 1 - wandb.log({f'first projection _{w_name}': loss.detach().cpu()}, step=global_config.training_step) - log_image_from_w(w_opt, G, w_name) - - # Step - optimizer.zero_grad(set_to_none=True) - loss.backward() - optimizer.step() - logprint(f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') - - # Normalize noise. - with torch.no_grad(): - for buf in noise_bufs.values(): - buf -= buf.mean() - buf *= buf.square().mean().rsqrt() - - del G - return w_opt diff --git a/spaces/haakohu/deep_privacy2/dp2/detection/models/cse.py b/spaces/haakohu/deep_privacy2/dp2/detection/models/cse.py deleted file mode 100644 index 7b9fbf75fc8fd6c1905993316c03383b9e935564..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/dp2/detection/models/cse.py +++ /dev/null @@ -1,134 +0,0 @@ -import torch -from typing import List -import tops -from torchvision.transforms.functional import InterpolationMode, resize -from densepose.data.utils import get_class_to_mesh_name_mapping -from densepose import add_densepose_config -from densepose.structures import DensePoseEmbeddingPredictorOutput -from densepose.vis.extractor import DensePoseOutputsExtractor -from densepose.modeling import build_densepose_embedder -from detectron2.config import get_cfg -from detectron2.data.transforms import ResizeShortestEdge -from detectron2.checkpoint.detection_checkpoint import DetectionCheckpointer -from detectron2.modeling import build_model - - -model_urls = { - "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml": "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl", - "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml": "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_s1x/251155172/model_final_c4ea5f.pkl", -} - - -def cse_det_to_global(boxes_XYXY, S: torch.Tensor, imshape): - assert len(S.shape) == 3 - H, W = imshape - N = len(boxes_XYXY) - segmentation = torch.zeros((N, H, W), dtype=torch.bool, device=S.device) - boxes_XYXY = boxes_XYXY.long() - for i in range(N): - x0, y0, x1, y1 = boxes_XYXY[i] - assert x0 >= 0 and y0 >= 0 - assert x1 <= imshape[1] - assert y1 <= imshape[0] - h = y1 - y0 - w = x1 - x0 - segmentation[i:i+1, y0:y1, x0:x1] = resize(S[i:i+1], (h, w), interpolation=InterpolationMode.NEAREST) > 0 - return segmentation - - -class CSEDetector: - - def __init__( - self, - cfg_url: str = "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml", - cfg_2_download: List[str] = [ - "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml", - "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN.yaml", - "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml"], - score_thres: float = 0.9, - nms_thresh: float = None, - ) -> None: - with tops.logger.capture_log_stdout(): - cfg = get_cfg() - self.device = tops.get_device() - add_densepose_config(cfg) - cfg_path = tops.download_file(cfg_url) - for p in cfg_2_download: - tops.download_file(p) - with tops.logger.capture_log_stdout(): - cfg.merge_from_file(cfg_path) - assert cfg_url in model_urls, cfg_url - model_path = tops.download_file(model_urls[cfg_url]) - cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thres - if nms_thresh is not None: - cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = nms_thresh - cfg.MODEL.WEIGHTS = str(model_path) - cfg.MODEL.DEVICE = str(self.device) - cfg.freeze() - with tops.logger.capture_log_stdout(): - self.model = build_model(cfg) - self.model.eval() - DetectionCheckpointer(self.model).load(str(model_path)) - self.input_format = cfg.INPUT.FORMAT - self.densepose_extractor = DensePoseOutputsExtractor() - self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg) - - self.embedder = build_densepose_embedder(cfg) - self.mesh_vertex_embeddings = { - mesh_name: self.embedder(mesh_name).to(self.device) - for mesh_name in self.class_to_mesh_name.values() - if self.embedder.has_embeddings(mesh_name) - } - self.cfg = cfg - self.embed_map = self.mesh_vertex_embeddings["smpl_27554"] - tops.logger.log("CSEDetector built.") - - def __call__(self, *args, **kwargs): - return self.forward(*args, **kwargs) - - def resize_im(self, im): - H, W = im.shape[1:] - newH, newW = ResizeShortestEdge.get_output_shape( - H, W, self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MAX_SIZE_TEST) - return resize( - im, (newH, newW), InterpolationMode.BILINEAR, antialias=True) - - @torch.no_grad() - def forward(self, im): - assert im.dtype == torch.uint8 - if self.input_format == "BGR": - im = im.flip(0) - H, W = im.shape[1:] - im = self.resize_im(im) - output = self.model([{"image": im, "height": H, "width": W}])[0]["instances"] - scores = output.get("scores") - if len(scores) == 0: - return dict( - instance_segmentation=torch.empty((0, 0, 112, 112), dtype=torch.bool, device=im.device), - instance_embedding=torch.empty((0, 16, 112, 112), dtype=torch.float32, device=im.device), - embed_map=self.mesh_vertex_embeddings["smpl_27554"], - bbox_XYXY=torch.empty((0, 4), dtype=torch.long, device=im.device), - im_segmentation=torch.empty((0, H, W), dtype=torch.bool, device=im.device), - scores=torch.empty((0), dtype=torch.float, device=im.device) - ) - pred_densepose, boxes_xywh, classes = self.densepose_extractor(output) - assert isinstance(pred_densepose, DensePoseEmbeddingPredictorOutput), pred_densepose - S = pred_densepose.coarse_segm.argmax(dim=1) # Segmentation channel Nx2xHxW (2 because only 2 classes) - E = pred_densepose.embedding - mesh_name = self.class_to_mesh_name[classes[0]] - assert mesh_name == "smpl_27554" - x0, y0, w, h = [boxes_xywh[:, i] for i in range(4)] - boxes_XYXY = torch.stack((x0, y0, x0+w, y0+h), dim=-1) - boxes_XYXY = boxes_XYXY.round_().long() - - non_empty_boxes = (boxes_XYXY[:, :2] == boxes_XYXY[:, 2:]).any(dim=1).logical_not() - S = S[non_empty_boxes] - E = E[non_empty_boxes] - boxes_XYXY = boxes_XYXY[non_empty_boxes] - scores = scores[non_empty_boxes] - im_segmentation = cse_det_to_global(boxes_XYXY, S, [H, W]) - return dict( - instance_segmentation=S, instance_embedding=E, - bbox_XYXY=boxes_XYXY, - im_segmentation=im_segmentation, - scores=scores.view(-1)) diff --git a/spaces/hahahafofo/vits-uma-genshin-honkai/monotonic_align/__init__.py b/spaces/hahahafofo/vits-uma-genshin-honkai/monotonic_align/__init__.py deleted file mode 100644 index e97eecc595dd3bd97d0104ec62799e2e5efea57c..0000000000000000000000000000000000000000 --- a/spaces/hahahafofo/vits-uma-genshin-honkai/monotonic_align/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/backbone/resnet.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/backbone/resnet.py deleted file mode 100644 index e0a0feec0de1fcbef2ef7c42ea805b87df2c2810..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/backbone/resnet.py +++ /dev/null @@ -1,643 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -""" -Variant of the resnet module that takes cfg as an argument. -Example usage. Strings may be specified in the config file. - model = ResNet( - "StemWithFixedBatchNorm", - "BottleneckWithFixedBatchNorm", - "ResNet50StagesTo4", - ) -OR: - model = ResNet( - "StemWithGN", - "BottleneckWithGN", - "ResNet50StagesTo4", - ) -Custom implementations may be written in user code and hooked in via the -`register_*` functions. -""" -from collections import namedtuple - -import torch -import torch.nn.functional as F -from torch import nn -from torch.nn import BatchNorm2d, SyncBatchNorm - -from maskrcnn_benchmark.layers import FrozenBatchNorm2d, NaiveSyncBatchNorm2d -from maskrcnn_benchmark.layers import Conv2d, DFConv2d, SELayer -from maskrcnn_benchmark.modeling.make_layers import group_norm -from maskrcnn_benchmark.utils.registry import Registry - - -# ResNet stage specification -StageSpec = namedtuple( - "StageSpec", - [ - "index", # Index of the stage, eg 1, 2, ..,. 5 - "block_count", # Number of residual blocks in the stage - "return_features", # True => return the last feature map from this stage - ], -) - -# ----------------------------------------------------------------------------- -# Standard ResNet models -# ----------------------------------------------------------------------------- -# ResNet-50 (including all stages) -ResNet50StagesTo5 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True)) -) -# ResNet-50 up to stage 4 (excludes stage 5) -ResNet50StagesTo4 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True)) -) -# ResNet-101 (including all stages) -ResNet101StagesTo5 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, False), (4, 3, True)) -) -# ResNet-101 up to stage 4 (excludes stage 5) -ResNet101StagesTo4 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, True)) -) -# ResNet-50-FPN (including all stages) -ResNet50FPNStagesTo5 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True)) -) -# ResNet-101-FPN (including all stages) -ResNet101FPNStagesTo5 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True)) -) -# ResNet-152-FPN (including all stages) -ResNet152FPNStagesTo5 = tuple( - StageSpec(index=i, block_count=c, return_features=r) - for (i, c, r) in ((1, 3, True), (2, 8, True), (3, 36, True), (4, 3, True)) -) - -class ResNet(nn.Module): - def __init__(self, cfg): - super(ResNet, self).__init__() - - # If we want to use the cfg in forward(), then we should make a copy - # of it and store it for later use: - # self.cfg = cfg.clone() - - # Translate string names to implementations - norm_level = None - stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC] - stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY] - transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC] - - if cfg.MODEL.BACKBONE.USE_BN: - stem_module = StemWithBatchNorm - transformation_module = BottleneckWithBatchNorm - norm_level = cfg.MODEL.BACKBONE.NORM_LEVEL - elif cfg.MODEL.BACKBONE.USE_NSYNCBN: - stem_module = StemWithNaiveSyncBatchNorm - transformation_module = BottleneckWithNaiveSyncBatchNorm - norm_level = cfg.MODEL.BACKBONE.NORM_LEVEL - elif cfg.MODEL.BACKBONE.USE_SYNCBN: - stem_module = StemWithSyncBatchNorm - transformation_module = BottleneckWithSyncBatchNorm - norm_level = cfg.MODEL.BACKBONE.NORM_LEVEL - - # Construct the stem module - self.stem = stem_module(cfg) - - # Constuct the specified ResNet stages - num_groups = cfg.MODEL.RESNETS.NUM_GROUPS - width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP - in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS - stage2_bottleneck_channels = num_groups * width_per_group - stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS - with_se = cfg.MODEL.RESNETS.WITH_SE - - self.stages = [] - self.out_channels = [] - self.return_features = {} - for stage_spec in stage_specs: - name = "layer" + str(stage_spec.index) - stage2_relative_factor = 2 ** (stage_spec.index - 1) - bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor - out_channels = stage2_out_channels * stage2_relative_factor - stage_with_dcn = cfg.MODEL.RESNETS.STAGE_WITH_DCN[stage_spec.index - 1] - if cfg.MODEL.RESNETS.USE_AVG_DOWN: - avg_down_stride = 1 if stage_spec.index==1 else 2 - else: - avg_down_stride = 0 - module = _make_stage( - transformation_module, - in_channels, - bottleneck_channels, - out_channels, - stage_spec.block_count, - num_groups, - cfg.MODEL.RESNETS.STRIDE_IN_1X1, - first_stride=int(stage_spec.index > 1) + 1, - dcn_config={ - "stage_with_dcn": stage_with_dcn, - "with_modulated_dcn": cfg.MODEL.RESNETS.WITH_MODULATED_DCN, - "deformable_groups": cfg.MODEL.RESNETS.DEFORMABLE_GROUPS, - }, - norm_level=norm_level, - with_se=with_se, - avg_down_stride=avg_down_stride - ) - in_channels = out_channels - self.add_module(name, module) - self.stages.append(name) - self.out_channels.append(out_channels) - self.return_features[name] = stage_spec.return_features - - # Optionally freeze (requires_grad=False) parts of the backbone - self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) - - def _freeze_backbone(self, freeze_at): - if freeze_at < 0: - return - for stage_index in range(freeze_at): - if stage_index == 0: - m = self.stem # stage 0 is the stem - else: - m = getattr(self, "layer" + str(stage_index)) - for p in m.parameters(): - p.requires_grad = False - - def forward(self, x): - outputs = [] - x = self.stem(x) - for stage_name in self.stages: - x = getattr(self, stage_name)(x) - if self.return_features[stage_name]: - outputs.append(x) - return outputs - - -class ResNetHead(nn.Module): - def __init__( - self, - block_module, - stages, - num_groups=1, - width_per_group=64, - stride_in_1x1=True, - stride_init=None, - res2_out_channels=256, - dilation=1, - dcn_config=None - ): - super(ResNetHead, self).__init__() - - stage2_relative_factor = 2 ** (stages[0].index - 1) - stage2_bottleneck_channels = num_groups * width_per_group - out_channels = res2_out_channels * stage2_relative_factor - in_channels = out_channels // 2 - bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor - - block_module = _TRANSFORMATION_MODULES[block_module] - - self.stages = [] - stride = stride_init - for stage in stages: - name = "layer" + str(stage.index) - if not stride: - stride = int(stage.index > 1) + 1 - module = _make_stage( - block_module, - in_channels, - bottleneck_channels, - out_channels, - stage.block_count, - num_groups, - stride_in_1x1, - first_stride=stride, - dilation=dilation, - dcn_config=dcn_config - ) - stride = None - self.add_module(name, module) - self.stages.append(name) - self.out_channels = out_channels - - def forward(self, x): - for stage in self.stages: - x = getattr(self, stage)(x) - return x - - -def _make_stage( - transformation_module, - in_channels, - bottleneck_channels, - out_channels, - block_count, - num_groups, - stride_in_1x1, - first_stride, - dilation=1, - dcn_config=None, - norm_level=None, - **kwargs -): - blocks = [] - stride = first_stride - for li in range(block_count): - if norm_level is not None: - layer_module = BottleneckWithFixedBatchNorm - if norm_level >= 1 and li == 0: - layer_module = transformation_module - if norm_level >= 2 and li == block_count - 1: - layer_module = transformation_module - if norm_level >= 3: - layer_module = transformation_module - else: - layer_module = transformation_module - - blocks.append( - layer_module( - in_channels, - bottleneck_channels, - out_channels, - num_groups, - stride_in_1x1, - stride, - dilation=dilation, - dcn_config=dcn_config, - **kwargs - ) - ) - stride = 1 - in_channels = out_channels - return nn.Sequential(*blocks) - - -class Bottleneck(nn.Module): - def __init__( - self, - in_channels, - bottleneck_channels, - out_channels, - num_groups, - stride_in_1x1, - stride, - dilation, - norm_func, - dcn_config, - with_se=False, - avg_down_stride=0, - ): - super(Bottleneck, self).__init__() - - self.downsample = None - if in_channels != out_channels: - down_stride = stride if dilation == 1 else 1 - if avg_down_stride>0: - self.downsample = nn.Sequential( - nn.AvgPool2d( - kernel_size=avg_down_stride, - stride=avg_down_stride, - ceil_mode=True, - count_include_pad=False - ), - nn.Conv2d( - in_channels, out_channels, - kernel_size=1, stride=1, bias=False - ), - norm_func(out_channels), - ) - else: - self.downsample = nn.Sequential( - Conv2d( - in_channels, out_channels, - kernel_size=1, stride=down_stride, bias=False - ), - norm_func(out_channels), - ) - for modules in [self.downsample,]: - for l in modules.modules(): - if isinstance(l, Conv2d): - nn.init.kaiming_uniform_(l.weight, a=1) - - if dilation > 1: - stride = 1 # reset to be 1 - - # The original MSRA ResNet models have stride in the first 1x1 conv - # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have - # stride in the 3x3 conv - stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) - - self.conv1 = Conv2d( - in_channels, - bottleneck_channels, - kernel_size=1, - stride=stride_1x1, - bias=False, - ) - self.bn1 = norm_func(bottleneck_channels) - # TODO: specify init for the above - with_dcn = dcn_config.get("stage_with_dcn", False) - if with_dcn: - deformable_groups = dcn_config.get("deformable_groups", 1) - with_modulated_dcn = dcn_config.get("with_modulated_dcn", False) - self.conv2 = DFConv2d( - bottleneck_channels, - bottleneck_channels, - with_modulated_dcn=with_modulated_dcn, - kernel_size=3, - stride=stride_3x3, - groups=num_groups, - dilation=dilation, - deformable_groups=deformable_groups, - bias=False - ) - else: - self.conv2 = Conv2d( - bottleneck_channels, - bottleneck_channels, - kernel_size=3, - stride=stride_3x3, - padding=dilation, - bias=False, - groups=num_groups, - dilation=dilation - ) - nn.init.kaiming_uniform_(self.conv2.weight, a=1) - - self.bn2 = norm_func(bottleneck_channels) - - self.conv3 = Conv2d( - bottleneck_channels, out_channels, kernel_size=1, bias=False - ) - self.bn3 = norm_func(out_channels) - - self.se = SELayer(out_channels) if with_se and not with_dcn else None - - for l in [self.conv1, self.conv3,]: - nn.init.kaiming_uniform_(l.weight, a=1) - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = F.relu_(out) - - out = self.conv2(out) - out = self.bn2(out) - out = F.relu_(out) - - out0 = self.conv3(out) - out = self.bn3(out0) - - if self.se: - out = self.se(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = F.relu_(out) - - return out - - -class BaseStem(nn.Module): - def __init__(self, cfg, norm_func): - super(BaseStem, self).__init__() - - out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS - self.stem_3x3 = cfg.MODEL.RESNETS.USE_STEM3X3 - - if self.stem_3x3: - self.conv1 = Conv2d( - 3, out_channels, kernel_size=3, stride=2, padding=1, bias=False - ) - self.bn1 = norm_func(out_channels) - self.conv2 = Conv2d( - out_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False - ) - self.bn2 = norm_func(out_channels) - for l in [self.conv1, self.conv2]: - nn.init.kaiming_uniform_(l.weight, a=1) - else: - self.conv1 = Conv2d( - 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False - ) - self.bn1 = norm_func(out_channels) - - for l in [self.conv1,]: - nn.init.kaiming_uniform_(l.weight, a=1) - - def forward(self, x): - if self.stem_3x3: - x = self.conv1(x) - x = self.bn1(x) - x = F.relu_(x) - x = self.conv2(x) - x = self.bn2(x) - x = F.relu_(x) - else: - x = self.conv1(x) - x = self.bn1(x) - x = F.relu_(x) - x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) - return x - - -class BottleneckWithFixedBatchNorm(Bottleneck): - def __init__( - self, - in_channels, - bottleneck_channels, - out_channels, - num_groups=1, - stride_in_1x1=True, - stride=1, - dilation=1, - dcn_config=None, - **kwargs - ): - super(BottleneckWithFixedBatchNorm, self).__init__( - in_channels=in_channels, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - stride_in_1x1=stride_in_1x1, - stride=stride, - dilation=dilation, - norm_func=FrozenBatchNorm2d, - dcn_config=dcn_config, - **kwargs - ) - - -class StemWithFixedBatchNorm(BaseStem): - def __init__(self, cfg): - super(StemWithFixedBatchNorm, self).__init__( - cfg, norm_func=FrozenBatchNorm2d - ) - - -class BottleneckWithBatchNorm(Bottleneck): - def __init__( - self, - in_channels, - bottleneck_channels, - out_channels, - num_groups=1, - stride_in_1x1=True, - stride=1, - dilation=1, - dcn_config=None, - **kwargs - ): - super(BottleneckWithBatchNorm, self).__init__( - in_channels=in_channels, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - stride_in_1x1=stride_in_1x1, - stride=stride, - dilation=dilation, - norm_func=BatchNorm2d, - dcn_config=dcn_config, - **kwargs - ) - - -class StemWithBatchNorm(BaseStem): - def __init__(self, cfg): - super(StemWithBatchNorm, self).__init__( - cfg, norm_func=BatchNorm2d - ) - - -class BottleneckWithNaiveSyncBatchNorm(Bottleneck): - def __init__( - self, - in_channels, - bottleneck_channels, - out_channels, - num_groups=1, - stride_in_1x1=True, - stride=1, - dilation=1, - dcn_config=None, - **kwargs - ): - super(BottleneckWithNaiveSyncBatchNorm, self).__init__( - in_channels=in_channels, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - stride_in_1x1=stride_in_1x1, - stride=stride, - dilation=dilation, - norm_func=NaiveSyncBatchNorm2d, - dcn_config=dcn_config, - **kwargs - ) - - -class StemWithNaiveSyncBatchNorm(BaseStem): - def __init__(self, cfg): - super(StemWithNaiveSyncBatchNorm, self).__init__( - cfg, norm_func=NaiveSyncBatchNorm2d - ) - - -class BottleneckWithSyncBatchNorm(Bottleneck): - def __init__( - self, - in_channels, - bottleneck_channels, - out_channels, - num_groups=1, - stride_in_1x1=True, - stride=1, - dilation=1, - dcn_config=None, - **kwargs - ): - super(BottleneckWithSyncBatchNorm, self).__init__( - in_channels=in_channels, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - stride_in_1x1=stride_in_1x1, - stride=stride, - dilation=dilation, - norm_func=SyncBatchNorm, - dcn_config=dcn_config, - **kwargs - ) - - -class StemWithSyncBatchNorm(BaseStem): - def __init__(self, cfg): - super(StemWithSyncBatchNorm, self).__init__( - cfg, norm_func=SyncBatchNorm - ) - - -class BottleneckWithGN(Bottleneck): - def __init__( - self, - in_channels, - bottleneck_channels, - out_channels, - num_groups=1, - stride_in_1x1=True, - stride=1, - dilation=1, - dcn_config=None, - **kwargs - ): - super(BottleneckWithGN, self).__init__( - in_channels=in_channels, - bottleneck_channels=bottleneck_channels, - out_channels=out_channels, - num_groups=num_groups, - stride_in_1x1=stride_in_1x1, - stride=stride, - dilation=dilation, - norm_func=group_norm, - dcn_config=dcn_config, - **kwargs - ) - - -class StemWithGN(BaseStem): - def __init__(self, cfg): - super(StemWithGN, self).__init__(cfg, norm_func=group_norm) - - -_TRANSFORMATION_MODULES = Registry({ - "BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm, - "BottleneckWithGN": BottleneckWithGN, -}) - -_STEM_MODULES = Registry({ - "StemWithFixedBatchNorm": StemWithFixedBatchNorm, - "StemWithGN": StemWithGN, -}) - -_STAGE_SPECS = Registry({ - "R-50-C4": ResNet50StagesTo4, - "R-50-C5": ResNet50StagesTo5, - "R-50-RETINANET": ResNet50StagesTo5, - "R-101-C4": ResNet101StagesTo4, - "R-101-C5": ResNet101StagesTo5, - "R-101-RETINANET": ResNet101StagesTo5, - "R-50-FPN": ResNet50FPNStagesTo5, - "R-50-FPN-RETINANET": ResNet50FPNStagesTo5, - "R-50-FPN-FCOS": ResNet50FPNStagesTo5, - "R-101-FPN": ResNet101FPNStagesTo5, - "R-101-FPN-RETINANET": ResNet101FPNStagesTo5, - "R-101-FPN-FCOS": ResNet101FPNStagesTo5, - "R-152-FPN": ResNet152FPNStagesTo5, -}) \ No newline at end of file diff --git a/spaces/heiyuan/ChatGPT/llama_func.py b/spaces/heiyuan/ChatGPT/llama_func.py deleted file mode 100644 index c71027dd4e6f99c0c12626cbbf276f407877be04..0000000000000000000000000000000000000000 --- a/spaces/heiyuan/ChatGPT/llama_func.py +++ /dev/null @@ -1,192 +0,0 @@ -import os -import logging - -from llama_index import GPTSimpleVectorIndex -from llama_index import download_loader -from llama_index import ( - Document, - LLMPredictor, - PromptHelper, - QuestionAnswerPrompt, - RefinePrompt, -) -from langchain.llms import OpenAI -import colorama - - -from presets import * -from utils import * - - -def get_documents(file_src): - documents = [] - index_name = "" - logging.debug("Loading documents...") - logging.debug(f"file_src: {file_src}") - for file in file_src: - logging.debug(f"file: {file.name}") - index_name += file.name - if os.path.splitext(file.name)[1] == ".pdf": - logging.debug("Loading PDF...") - CJKPDFReader = download_loader("CJKPDFReader") - loader = CJKPDFReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".docx": - logging.debug("Loading DOCX...") - DocxReader = download_loader("DocxReader") - loader = DocxReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".epub": - logging.debug("Loading EPUB...") - EpubReader = download_loader("EpubReader") - loader = EpubReader() - documents += loader.load_data(file=file.name) - else: - logging.debug("Loading text file...") - with open(file.name, "r", encoding="utf-8") as f: - text = add_space(f.read()) - documents += [Document(text)] - index_name = sha1sum(index_name) - return documents, index_name - - -def construct_index( - api_key, - file_src, - max_input_size=4096, - num_outputs=1, - max_chunk_overlap=20, - chunk_size_limit=600, - embedding_limit=None, - separator=" ", - num_children=10, - max_keywords_per_chunk=10, -): - os.environ["OPENAI_API_KEY"] = api_key - chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit - embedding_limit = None if embedding_limit == 0 else embedding_limit - separator = " " if separator == "" else separator - - llm_predictor = LLMPredictor( - llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key) - ) - prompt_helper = PromptHelper( - max_input_size, - num_outputs, - max_chunk_overlap, - embedding_limit, - chunk_size_limit, - separator=separator, - ) - documents, index_name = get_documents(file_src) - if os.path.exists(f"./index/{index_name}.json"): - logging.info("找到了缓存的索引文件,加载中……") - return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json") - else: - try: - logging.debug("构建索引中……") - index = GPTSimpleVectorIndex( - documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper - ) - os.makedirs("./index", exist_ok=True) - index.save_to_disk(f"./index/{index_name}.json") - return index - except Exception as e: - print(e) - return None - - -def chat_ai( - api_key, - index, - question, - context, - chatbot, -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.info(f"Question: {question}") - - response, chatbot_display, status_text = ask_ai( - api_key, - index, - question, - replace_today(PROMPT_TEMPLATE), - REFINE_TEMPLATE, - SIM_K, - INDEX_QUERY_TEMPRATURE, - context, - ) - if response is None: - status_text = "查询失败,请换个问法试试" - return context, chatbot - response = response - - context.append({"role": "user", "content": question}) - context.append({"role": "assistant", "content": response}) - chatbot.append((question, chatbot_display)) - - os.environ["OPENAI_API_KEY"] = "" - return context, chatbot, status_text - - -def ask_ai( - api_key, - index, - question, - prompt_tmpl, - refine_tmpl, - sim_k=1, - temprature=0, - prefix_messages=[], -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.debug("Index file found") - logging.debug("Querying index...") - llm_predictor = LLMPredictor( - llm=OpenAI( - temperature=temprature, - model_name="gpt-3.5-turbo-0301", - prefix_messages=prefix_messages, - ) - ) - - response = None # Initialize response variable to avoid UnboundLocalError - qa_prompt = QuestionAnswerPrompt(prompt_tmpl) - rf_prompt = RefinePrompt(refine_tmpl) - response = index.query( - question, - llm_predictor=llm_predictor, - similarity_top_k=sim_k, - text_qa_template=qa_prompt, - refine_template=rf_prompt, - response_mode="compact", - ) - - if response is not None: - logging.info(f"Response: {response}") - ret_text = response.response - nodes = [] - for index, node in enumerate(response.source_nodes): - brief = node.source_text[:25].replace("\n", "") - nodes.append( - f"
              [{index+1}]\t{brief}...

              {node.source_text}

              " - ) - new_response = ret_text + "\n----------\n" + "\n\n".join(nodes) - logging.info( - f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}" - ) - os.environ["OPENAI_API_KEY"] = "" - return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens" - else: - logging.warning("No response found, returning None") - os.environ["OPENAI_API_KEY"] = "" - return None - - -def add_space(text): - punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "} - for cn_punc, en_punc in punctuations.items(): - text = text.replace(cn_punc, en_punc) - return text diff --git a/spaces/hekbobo/bingo/src/lib/hooks/chat-history.ts b/spaces/hekbobo/bingo/src/lib/hooks/chat-history.ts deleted file mode 100644 index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000 --- a/spaces/hekbobo/bingo/src/lib/hooks/chat-history.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { zip } from 'lodash-es' -import { ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { Storage } from '../storage' - -/** - * conversations:$botId => Conversation[] - * conversation:$botId:$cid:messages => ChatMessageModel[] - */ - -interface Conversation { - id: string - createdAt: number -} - -type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] } - -async function loadHistoryConversations(botId: BotId): Promise { - const key = `conversations:${botId}` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -async function deleteHistoryConversation(botId: BotId, cid: string) { - const conversations = await loadHistoryConversations(botId) - const newConversations = conversations.filter((c) => c.id !== cid) - await Storage.set({ [`conversations:${botId}`]: newConversations }) -} - -async function loadConversationMessages(botId: BotId, cid: string): Promise { - const key = `conversation:${botId}:${cid}:messages` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) { - const conversations = await loadHistoryConversations(botId) - if (!conversations.some((c) => c.id === cid)) { - conversations.unshift({ id: cid, createdAt: Date.now() }) - await Storage.set({ [`conversations:${botId}`]: conversations }) - } - const key = `conversation:${botId}:${cid}:messages` - await Storage.set({ [key]: messages }) -} - -export async function loadHistoryMessages(botId: BotId): Promise { - const conversations = await loadHistoryConversations(botId) - const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id))) - return zip(conversations, messagesList).map(([c, messages]) => ({ - id: c!.id, - createdAt: c!.createdAt, - messages: messages!, - })) -} - -export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) { - const messages = await loadConversationMessages(botId, conversationId) - const newMessages = messages.filter((m) => m.id !== messageId) - await setConversationMessages(botId, conversationId, newMessages) - if (!newMessages.length) { - await deleteHistoryConversation(botId, conversationId) - } -} diff --git a/spaces/hhhyrhe/vits-uma-genshin-honkai/text/symbols.py b/spaces/hhhyrhe/vits-uma-genshin-honkai/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/hhhyrhe/vits-uma-genshin-honkai/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/huaiji3y/bingo-Public/src/pages/api/sydney.ts b/spaces/huaiji3y/bingo-Public/src/pages/api/sydney.ts deleted file mode 100644 index 8bd7074bc72bd2803e4acf89d3814908893ff044..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/src/pages/api/sydney.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - const id = headers['x-forwarded-for'] - - debug(id, headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - debug(id, 'timeout') - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 3000) - closeDog.watch(() => { - debug(id, 'timeout close') - ws.close() - }, 20000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug(id, 'connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug(id, 'ws close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - debug(id, 'connection close') - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/hudsonhayes/Vodafone_CRM_Chatbot/app.py b/spaces/hudsonhayes/Vodafone_CRM_Chatbot/app.py deleted file mode 100644 index 885960e04eae215a2c2455594b54b842b72656a9..0000000000000000000000000000000000000000 --- a/spaces/hudsonhayes/Vodafone_CRM_Chatbot/app.py +++ /dev/null @@ -1,296 +0,0 @@ -from pydantic import NoneStr -import os -import mimetypes -import requests -import tempfile -import gradio as gr -import openai -import re -import json -from transformers import pipeline -import matplotlib.pyplot as plt -import plotly.express as px - -class SentimentAnalyzer: - def __init__(self): - self.model="facebook/bart-large-mnli" - def analyze_sentiment(self, text): - pipe = pipeline("zero-shot-classification", model=self.model) - label=["positive","negative","neutral"] - result = pipe(text, label) - sentiment_scores= {result['labels'][0]:result['scores'][0],result['labels'][1]:result['scores'][1],result['labels'][2]:result['scores'][2]} - sentiment_scores_str = f"Positive: {sentiment_scores['positive']:.2f}, Neutral: {sentiment_scores['neutral']:.2f}, Negative: {sentiment_scores['negative']:.2f}" - return sentiment_scores_str - def emotion_analysis(self,text): - prompt = f""" Your task is to analyze {text} and predict the emotion using scores. Emotions are categorized into the following list: Sadness, Happiness, Joy, Fear, Disgust, and Anger. You need to provide the emotion with the highest score. The scores should be in the range of 0.0 to 1.0, where 1.0 represents the highest intensity of the emotion. -Please analyze the text and provide the output in the following format: emotion: score [with one result having the highest score].""" - response = openai.Completion.create( - model="text-davinci-003", - prompt=prompt, - temperature=1, - max_tokens=60, - top_p=1, - frequency_penalty=0, - presence_penalty=0 - ) - - message = response.choices[0].text.strip().replace("\n","") - print(message) - return message - - def analyze_sentiment_for_graph(self, text): - pipe = pipeline("zero-shot-classification", model=self.model) - label=["positive", "negative", "neutral"] - result = pipe(text, label) - sentiment_scores = { - result['labels'][0]: result['scores'][0], - result['labels'][1]: result['scores'][1], - result['labels'][2]: result['scores'][2] - } - return sentiment_scores - - def emotion_analysis_for_graph(self,text): - - list_of_emotion=text.split(":") - label=list_of_emotion[0] - score=list_of_emotion[1] - score_dict={ - label:float(score) - } - print(score_dict) - return score_dict - - -class Summarizer: - def __init__(self): - pass - - def generate_summary(self, text): - model_engine = "text-davinci-003" - prompt = f"""summarize the following conversation delimited by triple backticks. - write within 30 words. - ```{text}``` """ - completions = openai.Completion.create( - engine=model_engine, - prompt=prompt, - max_tokens=60, - n=1, - stop=None, - temperature=0.5, - ) - message = completions.choices[0].text.strip() - return message - -history_state = gr.State() -summarizer = Summarizer() -sentiment = SentimentAnalyzer() - -class LangChain_Document_QA: - - def __init__(self): - pass - - def _add_text(self,history, text): - history = history + [(text, None)] - history_state.value = history - return history,gr.update(value="", interactive=False) - - def _agent_text(self,history, text): - response = text - history[-1][1] = response - history_state.value = history - return history - - def _chat_history(self): - history = history_state.value - formatted_history = " " - for entry in history: - customer_text, agent_text = entry - formatted_history += f"Customer: {customer_text}\n" - if agent_text: - formatted_history += f"Agent: {agent_text}\n" - return formatted_history - - def _display_history(self): - formatted_history=self._chat_history() - summary=summarizer.generate_summary(formatted_history) - return summary - - def _display_graph(self,sentiment_scores): - labels = sentiment_scores.keys() - scores = sentiment_scores.values() - fig = px.bar(x=scores, y=labels, orientation='h', color=labels, color_discrete_map={"Negative": "red", "Positive": "green", "Neutral": "gray"}) - fig.update_traces(texttemplate='%{x:.2f}%', textposition='outside') - fig.update_layout(height=500, width=200) - return fig - - def _history_of_chat(self): - history = history_state.value - formatted_history = "" - client="" - agent="" - for entry in history: - customer_text, agent_text = entry - client+=customer_text - formatted_history += f"Customer: {customer_text}\n" - if agent_text: - agent+=agent_text - formatted_history += f"Agent: {agent_text}\n" - return client,agent - - - def _suggested_answer(self,text): - try: - history = self._chat_history() - start_sequence = "\nCustomer:" - restart_sequence = "\nVodafone Customer Relationship Manager:" - prompt = 'your task is make a conversation between a customer and vodafone telecom customer relationship manager.' - file_path = "vodafone_customer_details.json" - with open(file_path) as file: - customer_details = json.load(file) - prompt = f"""{history}{start_sequence}{text}{restart_sequence} if customer ask any information take it from {customer_details}. - if customer say thanks or thankyou tone related messages You should not ask anything to end the conversation with greetings tone. - """ - response = openai.Completion.create( - model="text-davinci-003", - prompt=prompt, - temperature=0, - max_tokens=500, - top_p=1, - frequency_penalty=0, - presence_penalty=0.6, - ) - - message = response.choices[0].text.strip() - if ":" in message: - message = re.sub(r'^.*:', '', message) - return message.strip() - except: - return "I can't get the response" - - - - def _text_box(self,customer_emotion,agent_emotion,agent_sentiment_score,customer_sentiment_score): - agent_score = ", ".join([f"{key}: {value:.2f}" for key, value in agent_sentiment_score.items()]) - customer_score = ", ".join([f"{key}: {value:.2f}" for key, value in customer_sentiment_score.items()]) - return f"customer_emotion:{customer_emotion}\nagent_emotion:{agent_emotion}\nAgent_Sentiment_score:{agent_score}\nCustomer_sentiment_score:{customer_score}" - - def _on_sentiment_btn_click(self): - client,agent=self._history_of_chat() - - customer_emotion=sentiment.emotion_analysis(client) - customer_sentiment_score = sentiment.analyze_sentiment_for_graph(client) - - agent_emotion=sentiment.emotion_analysis(agent) - agent_sentiment_score = sentiment.analyze_sentiment_for_graph(agent) - - scores=self._text_box(customer_emotion,agent_emotion,agent_sentiment_score,customer_sentiment_score) - - customer_fig=self._display_graph(customer_sentiment_score) - customer_fig.update_layout(title="Sentiment Analysis",width=800) - - agent_fig=self._display_graph(agent_sentiment_score) - agent_fig.update_layout(title="Sentiment Analysis",width=800) - - agent_emotion_score = sentiment.emotion_analysis_for_graph(agent_emotion) - - agent_emotion_fig=self._display_graph(agent_emotion_score) - agent_emotion_fig.update_layout(title="Emotion Analysis",width=800) - - customer_emotion_score = sentiment.emotion_analysis_for_graph(customer_emotion) - - customer_emotion_fig=self._display_graph(customer_emotion_score) - customer_emotion_fig.update_layout(title="Emotion Analysis",width=800) - - return scores,customer_fig,agent_fig,customer_emotion_fig,agent_emotion_fig - - def clear_func(self): - history_state.clear() - def gradio_interface(self): - with gr.Blocks(css="style.css",theme=gr.themes.Soft()) as demo: - with gr.Row(): - gr.HTML("""Image - Image""") - - with gr.Row(): - gr.HTML("""

              Vodafone Generative AI CRM ChatBot

              """) - chatbot = gr.Chatbot([], elem_id="chatbot").style(height=300) - with gr.Row(): - with gr.Column(scale=0.50): - txt = gr.Textbox( - show_label=False, - placeholder="Customer", - ).style(container=False) - with gr.Column(scale=0.50): - txt2 = gr.Textbox( - show_label=False, - placeholder="Agent", - ).style(container=False) - - with gr.Column(scale=0.40): - txt3 =gr.Textbox( - show_label=False, - placeholder="GPT_Suggestion", - ).style(container=False) - with gr.Column(scale=0.10, min_width=0): - button=gr.Button( - value="🚀" - ) - with gr.Column(scale=0.10, min_width=0): - clear_btn=gr.Button( - value="Clear" - ) - with gr.Row(): - with gr.Column(scale=0.40): - txt4 =gr.Textbox( - show_label=False, - lines=4, - placeholder="Summary", - ).style(container=False) - with gr.Column(scale=0.10, min_width=0): - end_btn=gr.Button( - value="End" - ) - with gr.Column(scale=0.40): - txt5 =gr.Textbox( - show_label=False, - lines=4, - placeholder="Sentiment", - ).style(container=False) - - with gr.Column(scale=0.10, min_width=0): - Sentiment_btn=gr.Button( - value="📊",callback=self._on_sentiment_btn_click - ) - with gr.Row(): - gr.HTML("""

              Sentiment and Emotion Score Graph

              """) - with gr.Row(): - with gr.Column(scale=0.70, min_width=0): - plot =gr.Plot(label="Customer", size=(500, 600)) - with gr.Row(): - with gr.Column(scale=0.70, min_width=0): - plot_2 =gr.Plot(label="Agent", size=(500, 600)) - with gr.Row(): - with gr.Column(scale=0.70, min_width=0): - plot_3 =gr.Plot(label="Customer_Emotion", size=(500, 600)) - with gr.Row(): - with gr.Column(scale=0.70, min_width=0): - plot_4 =gr.Plot(label="Agent_Emotion", size=(500, 600)) - - - txt_msg = txt.submit(self._add_text, [chatbot, txt], [chatbot, txt]) - txt_msg.then(lambda: gr.update(interactive=True), None, [txt]) - txt.submit(self._suggested_answer,txt,txt3) - button.click(self._agent_text, [chatbot,txt3], chatbot) - txt2.submit(self._agent_text, [chatbot, txt2], chatbot).then( - self._agent_text, [chatbot, txt2], chatbot - ) - end_btn.click(self._display_history, [], txt4) - clear_btn.click(self.clear_func,[],[]) - clear_btn.click(lambda: None, None, chatbot, queue=False) - Sentiment_btn.click(self._on_sentiment_btn_click,[],[txt5,plot,plot_2,plot_3,plot_4]) - - demo.title = "Vodafone Generative AI CRM ChatBot" - demo.launch() -document_qa =LangChain_Document_QA() -document_qa.gradio_interface() diff --git a/spaces/hugggof/vampnet/vampnet/modules/activations.py b/spaces/hugggof/vampnet/vampnet/modules/activations.py deleted file mode 100644 index c013c6302d1569e9c1937915a0ee638632071e51..0000000000000000000000000000000000000000 --- a/spaces/hugggof/vampnet/vampnet/modules/activations.py +++ /dev/null @@ -1,55 +0,0 @@ -import math -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange - - -class NewGELU(nn.Module): - """ - Implementation of the GELU activation function currently in Google BERT repo - (identical to OpenAI GPT). Also see the Gaussian Error Linear Units - paper: https://arxiv.org/abs/1606.08415 - """ - - def forward(self, x): - return ( - 0.5 - * x - * ( - 1.0 - + torch.tanh( - math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)) - ) - ) - ) - -class GatedGELU(nn.Module): - def __init__(self): - super().__init__() - self.gelu = NewGELU() - - def forward(self, x, dim: int = -1): - p1, p2 = x.chunk(2, dim=dim) - return p1 * self.gelu(p2) - -class Snake1d(nn.Module): - def __init__(self, channels): - super().__init__() - self.alpha = nn.Parameter(torch.ones(channels)) - - def forward(self, x): - return x + (self.alpha + 1e-9).reciprocal() * torch.sin(self.alpha * x).pow(2) - -def get_activation(name: str = "relu"): - if name == "relu": - return nn.ReLU - elif name == "gelu": - return NewGELU - elif name == "geglu": - return GatedGELU - elif name == "snake": - return Snake1d - else: - raise ValueError(f"Unrecognized activation {name}") \ No newline at end of file diff --git a/spaces/huggingface/data-measurements-tool/data_measurements/embeddings.py b/spaces/huggingface/data-measurements-tool/data_measurements/embeddings.py deleted file mode 100644 index 2d3e9e7330732b67b6b1c9f5b226cd4317a4e4b6..0000000000000000000000000000000000000000 --- a/spaces/huggingface/data-measurements-tool/data_measurements/embeddings.py +++ /dev/null @@ -1,550 +0,0 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from os.path import exists -from os.path import join as pjoin - -import plotly.graph_objects as go -import torch -import transformers -from datasets import load_from_disk -from plotly.io import read_json -from tqdm import tqdm - -from .dataset_utils import EMBEDDING_FIELD - - -def sentence_mean_pooling(model_output, attention_mask): - """Mean pooling of token embeddings for a sentence.""" - token_embeddings = model_output[ - 0 - ] # First element of model_output contains all token embeddings - input_mask_expanded = ( - attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - ) - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( - input_mask_expanded.sum(1), min=1e-9 - ) - - -class Embeddings: - def __init__( - self, - dstats=None, - text_dset=None, - text_field_name="text", - cache_path="", - use_cache=False, - ): - """Item embeddings and clustering""" - self.device = "cuda:0" if torch.cuda.is_available() else "cpu" - self.model_name = "sentence-transformers/all-mpnet-base-v2" - self.tokenizer = transformers.AutoTokenizer.from_pretrained(self.model_name) - self.model = transformers.AutoModel.from_pretrained(self.model_name).to( - self.device - ) - self.text_dset = text_dset if dstats is None else dstats.text_dset - self.text_field_name = ( - text_field_name if dstats is None else dstats.our_text_field - ) - self.cache_path = cache_path if dstats is None else dstats.cache_path - self.embeddings_dset_fid = pjoin(self.cache_path, "embeddings_dset") - self.embeddings_dset = None - self.node_list_fid = pjoin(self.cache_path, "node_list.th") - self.node_list = None - self.nid_map = None - self.fig_tree_fid = pjoin(self.cache_path, "node_figure.json") - self.fig_tree = None - self.cached_clusters = {} - self.use_cache = use_cache - - def compute_sentence_embeddings(self, sentences): - """ - Takes a list of sentences and computes their embeddings - using self.tokenizer and self.model (with output dimension D) - followed by mean pooling of the token representations and normalization - Args: - sentences ([string]): list of N input sentences - Returns: - torch.Tensor: sentence embeddings, dimension NxD - """ - batch = self.tokenizer( - sentences, padding=True, truncation=True, return_tensors="pt" - ) - batch = {k: v.to(self.device) for k, v in batch.items()} - with torch.no_grad(): - model_output = self.model(**batch) - sentence_embeds = sentence_mean_pooling( - model_output, batch["attention_mask"] - ) - sentence_embeds /= sentence_embeds.norm(dim=-1, keepdim=True) - return sentence_embeds - - def make_embeddings(self): - """ - Batch computes the embeddings of the Dataset self.text_dset, - using the field self.text_field_name as input. - Returns: - Dataset: HF dataset object with a single EMBEDDING_FIELD field - corresponding to the embeddings (list of floats) - """ - - def batch_embed_sentences(sentences): - return { - EMBEDDING_FIELD: [ - embed.tolist() - for embed in self.compute_sentence_embeddings( - sentences[self.text_field_name] - ) - ] - } - - self.embeddings_dset = self.text_dset.map( - batch_embed_sentences, - batched=True, - batch_size=32, - remove_columns=[self.text_field_name], - ) - - return self.embeddings_dset - - def make_text_embeddings(self): - """Load embeddings dataset from cache or compute it.""" - if self.use_cache and exists(self.embeddings_dset_fid): - self.embeddings_dset = load_from_disk(self.embeddings_dset_fid) - else: - self.embeddings_dset = self.make_embeddings() - self.embeddings_dset.save_to_disk(self.embeddings_dset_fid) - - def make_hierarchical_clustering( - self, - batch_size=1000, - approx_neighbors=1000, - min_cluster_size=10, - ): - if self.use_cache and exists(self.node_list_fid): - self.node_list, self.nid_map = torch.load(self.node_list_fid) - else: - self.make_text_embeddings() - embeddings = torch.Tensor(self.embeddings_dset[EMBEDDING_FIELD]) - self.node_list = fast_cluster( - embeddings, batch_size, approx_neighbors, min_cluster_size - ) - self.nid_map = dict( - [(node["nid"], nid) for nid, node in enumerate(self.node_list)] - ) - torch.save((self.node_list, self.nid_map), self.node_list_fid) - print(exists(self.fig_tree_fid), self.fig_tree_fid) - if self.use_cache and exists(self.fig_tree_fid): - self.fig_tree = read_json(self.fig_tree_fid) - else: - self.fig_tree = make_tree_plot( - self.node_list, self.nid_map, self.text_dset, self.text_field_name - ) - self.fig_tree.write_json(self.fig_tree_fid) - - def find_cluster_beam(self, sentence, beam_size=20): - """ - This function finds the `beam_size` leaf clusters that are closest to the - proposed sentence and returns the full path from the root to the cluster - along with the dot product between the sentence embedding and the - cluster centroid - Args: - sentence (string): input sentence for which to find clusters - beam_size (int): this is a beam size algorithm to explore the tree - Returns: - [([int], float)]: list of (path_from_root, score) sorted by score - """ - embed = self.compute_sentence_embeddings([sentence])[0].to("cpu") - active_paths = [([0], torch.dot(embed, self.node_list[0]["centroid"]).item())] - finished_paths = [] - children_ids_list = [ - [ - self.nid_map[nid] - for nid in self.node_list[path[-1]]["children_ids"] - if nid in self.nid_map - ] - for path, score in active_paths - ] - while len(active_paths) > 0: - next_ids = sorted( - [ - ( - beam_id, - nid, - torch.dot(embed, self.node_list[nid]["centroid"]).item(), - ) - for beam_id, children_ids in enumerate(children_ids_list) - for nid in children_ids - ], - key=lambda x: x[2], - reverse=True, - )[:beam_size] - paths = [ - (active_paths[beam_id][0] + [next_id], score) - for beam_id, next_id, score in next_ids - ] - active_paths = [] - for path, score in paths: - if ( - len( - [ - nid - for nid in self.node_list[path[-1]]["children_ids"] - if nid in self.nid_map - ] - ) - > 0 - ): - active_paths += [(path, score)] - else: - finished_paths += [(path, score)] - children_ids_list = [ - [ - self.nid_map[nid] - for nid in self.node_list[path[-1]]["children_ids"] - if nid in self.nid_map - ] - for path, score in active_paths - ] - return sorted( - finished_paths, - key=lambda x: x[-1], - reverse=True, - )[:beam_size] - - -def prepare_merges(embeddings, batch_size=1000, approx_neighbors=1000, low_thres=0.5): - """ - Prepares an initial list of merges for hierarchical - clustering. First compute the `approx_neighbors` nearest neighbors, - then propose a merge for any two points that are closer than `low_thres` - - Note that if a point has more than `approx_neighbors` neighbors - closer than `low_thres`, this approach will miss some of those merges - - Args: - embeddings (toch.Tensor): Tensor of sentence embeddings - dimension NxD - batch_size (int): compute nearest neighbors of `batch_size` points at a time - approx_neighbors (int): only keep `approx_neighbors` nearest neighbors of a point - low_thres (float): only return merges where the dot product is greater than `low_thres` - Returns: - torch.LongTensor: proposed merges ([i, j] with i>j) - dimension: Mx2 - torch.Tensor: merge scores - dimension M - """ - top_idx_pre = torch.cat( - [torch.LongTensor(range(embeddings.shape[0]))[:, None]] * batch_size, dim=1 - ) - top_val_all = torch.Tensor(0, approx_neighbors) - top_idx_all = torch.LongTensor(0, approx_neighbors) - n_batches = math.ceil(len(embeddings) / batch_size) - for b in tqdm(range(n_batches)): - # TODO: batch across second dimension - cos_scores = torch.mm( - embeddings[b * batch_size : (b + 1) * batch_size], embeddings.t() - ) - for i in range(cos_scores.shape[0]): - cos_scores[i, (b * batch_size) + i :] = -1 - top_val_large, top_idx_large = cos_scores.topk( - k=approx_neighbors, dim=-1, largest=True - ) - top_val_all = torch.cat([top_val_all, top_val_large], dim=0) - top_idx_all = torch.cat([top_idx_all, top_idx_large], dim=0) - max_neighbor_dist = top_val_large[:, -1].max().item() - if max_neighbor_dist > low_thres: - print( - f"WARNING: with the current set of neireast neighbor, the farthest is {max_neighbor_dist}" - ) - - all_merges = torch.cat( - [ - top_idx_pre[top_val_all > low_thres][:, None], - top_idx_all[top_val_all > low_thres][:, None], - ], - dim=1, - ) - all_merge_scores = top_val_all[top_val_all > low_thres] - - return (all_merges, all_merge_scores) - - -def merge_nodes(nodes, current_thres, previous_thres, all_merges, all_merge_scores): - """ - Merge all nodes if the max dot product between any of their descendants - is greater than current_thres. - - Args: - nodes ([dict]): list of dicts representing the current set of nodes - current_thres (float): merge all nodes closer than current_thres - previous_thres (float): nodes closer than previous_thres are already merged - all_merges (torch.LongTensor): proposed merges ([i, j] with i>j) - dimension: Mx2 - all_merge_scores (torch.Tensor): merge scores - dimension M - Returns: - [dict]: extended list with the newly created internal nodes - """ - merge_ids = (all_merge_scores <= previous_thres) * ( - all_merge_scores > current_thres - ) - if merge_ids.sum().item() > 0: - merges = all_merges[merge_ids] - for a, b in merges.tolist(): - node_a = nodes[a] - while node_a["parent_id"] != -1: - node_a = nodes[node_a["parent_id"]] - node_b = nodes[b] - while node_b["parent_id"] != -1: - node_b = nodes[node_b["parent_id"]] - if node_a["nid"] == node_b["nid"]: - continue - else: - # merge if threshold allows - if (node_a["depth"] + node_b["depth"]) > 0 and min( - node_a["merge_threshold"], node_b["merge_threshold"] - ) == current_thres: - merge_to = None - merge_from = None - if node_a["nid"] < node_b["nid"]: - merge_from = node_a - merge_to = node_b - if node_a["nid"] > node_b["nid"]: - merge_from = node_b - merge_to = node_a - merge_to["depth"] = max(merge_to["depth"], merge_from["depth"]) - merge_to["weight"] += merge_from["weight"] - merge_to["children_ids"] += ( - merge_from["children_ids"] - if merge_from["depth"] > 0 - else [merge_from["nid"]] - ) - for cid in merge_from["children_ids"]: - nodes[cid]["parent_id"] = merge_to["nid"] - merge_from["parent_id"] = merge_to["nid"] - # else new node - else: - new_nid = len(nodes) - new_node = { - "nid": new_nid, - "parent_id": -1, - "depth": max(node_a["depth"], node_b["depth"]) + 1, - "weight": node_a["weight"] + node_b["weight"], - "children": [], - "children_ids": [node_a["nid"], node_b["nid"]], - "example_ids": [], - "merge_threshold": current_thres, - } - node_a["parent_id"] = new_nid - node_b["parent_id"] = new_nid - nodes += [new_node] - return nodes - - -def finalize_node(node, nodes, min_cluster_size): - """Post-process nodes to sort children by descending weight, - get full list of leaves in the sub-tree, and direct links - to the cildren nodes, then recurses to all children. - - Nodes with fewer than `min_cluster_size` descendants are collapsed - into a single leaf. - """ - node["children"] = sorted( - [ - finalize_node(nodes[cid], nodes, min_cluster_size) - for cid in node["children_ids"] - ], - key=lambda x: x["weight"], - reverse=True, - ) - if node["depth"] > 0: - node["example_ids"] = [ - eid for child in node["children"] for eid in child["example_ids"] - ] - node["children"] = [ - child for child in node["children"] if child["weight"] >= min_cluster_size - ] - assert node["weight"] == len(node["example_ids"]), print(node) - return node - - -def fast_cluster( - embeddings, - batch_size=1000, - approx_neighbors=1000, - min_cluster_size=10, - low_thres=0.5, -): - """ - Computes an approximate hierarchical clustering based on example - embeddings. The join criterion is min clustering, i.e. two clusters - are joined if any pair of their descendants are closer than a threshold - - The approximate comes from the fact that only the `approx_neighbors` nearest - neighbors of an example are considered for merges - """ - batch_size = min(embeddings.shape[0], batch_size) - all_merges, all_merge_scores = prepare_merges( - embeddings, batch_size, approx_neighbors, low_thres - ) - # prepare leaves - nodes = [ - { - "nid": nid, - "parent_id": -1, - "depth": 0, - "weight": 1, - "children": [], - "children_ids": [], - "example_ids": [nid], - "merge_threshold": 1.0, - } - for nid in range(embeddings.shape[0]) - ] - # one level per threshold range - for i in range(10): - p_thres = 1 - i * 0.05 - c_thres = 0.95 - i * 0.05 - nodes = merge_nodes(nodes, c_thres, p_thres, all_merges, all_merge_scores) - # make root - root_children = [ - node - for node in nodes - if node["parent_id"] == -1 and node["weight"] >= min_cluster_size - ] - root = { - "nid": len(nodes), - "parent_id": -1, - "depth": max([node["depth"] for node in root_children]) + 1, - "weight": sum([node["weight"] for node in root_children]), - "children": [], - "children_ids": [node["nid"] for node in root_children], - "example_ids": [], - "merge_threshold": -1.0, - } - nodes += [root] - for node in root_children: - node["parent_id"] = root["nid"] - # finalize tree - tree = finalize_node(root, nodes, min_cluster_size) - node_list = [] - - def rec_map_nodes(node, node_list): - node_list += [node] - for child in node["children"]: - rec_map_nodes(child, node_list) - - rec_map_nodes(tree, node_list) - # get centroids and distances - for node in node_list: - node_embeds = embeddings[node["example_ids"]] - node["centroid"] = node_embeds.sum(dim=0) - node["centroid"] /= node["centroid"].norm() - node["centroid_dot_prods"] = torch.mv(node_embeds, node["centroid"]) - node["sorted_examples_centroid"] = sorted( - [ - (eid, edp.item()) - for eid, edp in zip(node["example_ids"], node["centroid_dot_prods"]) - ], - key=lambda x: x[1], - reverse=True, - ) - return node_list - - -def make_tree_plot(node_list, nid_map, text_dset, text_field_name): - """ - Makes a graphical representation of the tree encoded - in node-list. The hover label for each node shows the number - of descendants and the 5 examples that are closest to the centroid - """ - for nid, node in enumerate(node_list): - # get list of - node_examples = {} - for sid, score in node["sorted_examples_centroid"]: - node_examples[text_dset[sid][text_field_name]] = score - if len(node_examples) >= 5: - break - node["label"] = node.get( - "label", - f"{nid:2d} - {node['weight']:5d} items
              " - + "
              ".join( - [ - f" {score:.2f} > {txt[:64]}" + ("..." if len(txt) >= 63 else "") - for txt, score in node_examples.items() - ] - ), - ) - - # make plot nodes - labels = [node["label"] for node in node_list] - - root = node_list[0] - root["X"] = 0 - root["Y"] = 0 - - def rec_make_coordinates(node): - total_weight = 0 - add_weight = len(node["example_ids"]) - sum( - [child["weight"] for child in node["children"]] - ) - for child in node["children"]: - child["X"] = node["X"] + total_weight - child["Y"] = node["Y"] - 1 - total_weight += child["weight"] + add_weight / len(node["children"]) - rec_make_coordinates(child) - - rec_make_coordinates(root) - - E = [] # list of edges - Xn = [] - Yn = [] - Xe = [] - Ye = [] - for nid, node in enumerate(node_list): - Xn += [node["X"]] - Yn += [node["Y"]] - for child in node["children"]: - E += [(nid, nid_map[child["nid"]])] - Xe += [node["X"], child["X"], None] - Ye += [node["Y"], child["Y"], None] - - # make figure - fig = go.Figure() - fig.add_trace( - go.Scatter( - x=Xe, - y=Ye, - mode="lines", - line=dict(color="rgb(210,210,210)", width=1), - hoverinfo="none", - ) - ) - fig.add_trace( - go.Scatter( - x=Xn, - y=Yn, - mode="markers", - name="nodes", - marker=dict( - symbol="circle-dot", - size=18, - color="#6175c1", - line=dict(color="rgb(50,50,50)", width=1) - # '#DB4551', - ), - text=labels, - hoverinfo="text", - opacity=0.8, - ) - ) - return fig diff --git a/spaces/hugginglearners/brain-tumor-detection-mri/app.py b/spaces/hugginglearners/brain-tumor-detection-mri/app.py deleted file mode 100644 index 23049309de2463bbf33c1bdc9f6594b967657d03..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/brain-tumor-detection-mri/app.py +++ /dev/null @@ -1,41 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -from huggingface_hub import from_pretrained_fastai - -#repo_id = "hugginglearners/brain-tumor-detection-mri" -learn = from_pretrained_fastai("hugginglearners/brain-tumor-detection-mri") -labels = learn.dls.vocab - -def predict(img): - img = PILImage.create(img) - _pred, _pred_w_idx, probs = learn.predict(img) - # gradio doesn't support tensors, so converting to float - labels_probs = {labels[i]: float(probs[i]) for i, _ in enumerate(labels)} - return labels_probs - -interface_options = { - "title": "Brain tumor detection for MRI images", - "description": "For reference only. Should **not** be used for medical diagnosis", - "interpretation": "default", - "layout": "horizontal", - "examples": [ - "no 89.jpg", - "Y22.jpg" - ], - "allow_flagging": "never", -} - -demo = gr.Interface( - fn=predict, - inputs=gr.inputs.Image(shape=(480, 480)), - outputs=gr.outputs.Label(num_top_classes=2), - cache_examples=False, - **interface_options, -) - -launch_options = { - "enable_queue": True, - "share": False, -} - -demo.launch(**launch_options) \ No newline at end of file diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_pfc02_r100.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_pfc02_r100.py deleted file mode 100644 index 72f0f0ec0ce5c523bace8b7869181ea807e72423..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/wf12m_pfc02_r100.py +++ /dev/null @@ -1,28 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.margin_list = (1.0, 0.0, 0.4) -config.network = "r100" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 0.2 -config.interclass_filtering_threshold = 0 -config.fp16 = True -config.weight_decay = 5e-4 -config.batch_size = 128 -config.optimizer = "sgd" -config.lr = 0.1 -config.verbose = 2000 -config.dali = False - -config.rec = "/train_tmp/WebFace12M" -config.num_classes = 617970 -config.num_image = 12720066 -config.num_epoch = 20 -config.warmup_epoch = 0 -config.val_targets = [] diff --git a/spaces/iamlonely/destroylonely/Dockerfile b/spaces/iamlonely/destroylonely/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/iamlonely/destroylonely/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/inamXcontru/PoeticTTS/Adobe After Effects Keygen Cs6.md b/spaces/inamXcontru/PoeticTTS/Adobe After Effects Keygen Cs6.md deleted file mode 100644 index 99429db8be8c7763ef4d7e00f7dce5de2bcccec0..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Adobe After Effects Keygen Cs6.md +++ /dev/null @@ -1,6 +0,0 @@ -

              adobe after effects keygen cs6


              Download >>>>> https://gohhs.com/2uz3s6



              - -Adobe After Effects CC - Adobe Animate CC 2017 - Adobe Animate CC 2015 ... Adobe Photoshop CC 2018 Serial Number 2018 + Key (Keygen, License) Activator. ... Amtlib Dll Crack with Patch for Adobe Master Collection CS6 Free is a touch ... 4d29de3e1b
              -
              -
              -

              diff --git a/spaces/inamXcontru/PoeticTTS/Detonate 1.2 Full Version Free.md b/spaces/inamXcontru/PoeticTTS/Detonate 1.2 Full Version Free.md deleted file mode 100644 index ba57ead1a8120b2ff91d93de0defe4a938c4978c..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Detonate 1.2 Full Version Free.md +++ /dev/null @@ -1,10 +0,0 @@ -

              Detonate 1.2 Full Version Free


              Download Filehttps://gohhs.com/2uz3bk



              - -Detonate 1.2 Full Version Free - DOWNLOAD: - detonate meaning, detonate, detonated, detonate dead poe, detonate là gì, detonate ... TotallyFreeDownloads.com - Free Software and ... -- Free Downloads & Software - TotallyFreeDownloads.com - Free Software and Games - Download free software and apps -detonate 1.2 full version free download - Detonate 1.2 Full Version Free - DOWNLOAD: - detonate meaning, detonate, detonated, detonate dead poe, detonate là gì, detonate ... -All-new, fully compatible with the latest versions of Android and Android 4 ... DOWNLOAD - -Detonate 1.2 Full Version Free Download 8a78ff9644
              -
              -
              -

              diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/ABCD Any Body Can Dance 2 English Dubbed Torrent Download.md b/spaces/inplisQlawa/anything-midjourney-v4-1/ABCD Any Body Can Dance 2 English Dubbed Torrent Download.md deleted file mode 100644 index 3676c7c00ff1e432218e2524f47ce31cd305b395..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/ABCD Any Body Can Dance 2 English Dubbed Torrent Download.md +++ /dev/null @@ -1,6 +0,0 @@ -
              -

              ABCD Any Body Can Dance 2. TV Shows.. I really enjoy this movie and I want to share it with everyone. Find all the latest TV Shows online. Time in miliseconds. 34.1 77.0 -.3 0.0. 1st Step.. This is the best dance song by Shagun in her very first movie 'ABCD 2'. Write your name in the comment box to download it for free. ABCD Any Body Can Dance 2 - আবার একটা জন্য বাজে 2 ডাইভ অফ কে কারণে. ABCD Any Body Can Dance 2 - আবার একটা জন্য বাজে 2 ডাইভ অফ কে কারণে. . Song Chives is the first video music site to combine fan driven charts and music videos into a single consistent music video site. Download 'Beauty And The Beast' Game King Game and many more programs here! Boys riding giraffes 2nd episode 01 torrent. . Kalam kal khair ho 1 english audio. Comedian who writes humourous monologues and story was teaching dance to the elderly..

              -

              ABCD Any Body Can Dance 2 English Dubbed Torrent Download


              Download ……… https://urlin.us/2uEyb5



              -

              Find all the latest TV Shows online. Download the official movie music video on Gaana for 'Haseena Maathiri (Unreleased)' by Manikanth, Gaana. Watch the official music video for The Way I Are (Dance With Somebody) by Bebe Rexha feat. Lil Wayne from the album All Your Fault: Pt. 2. . Zameen kaun hai title song 2016. Hindi youtube mobile - ABCD Any Body Can Dance 2: Directed by Remo D'Souza. With Prabhu Deva, Varun Dhawan, Shraddha Kapoor, Lauren Gottlieb. Based on the true story of the Fictitious. Anybody can dance (ABCD) - download torrent torrents - download Anybody can dance 2 torrent. Anybody can dance 2: Directed by Remo D'Souza. With Prabhu Deva, Ganesh Acharya, Kay Kay Menon, Lauren Gottlieb. When a capable dancer is provoked by. Whether it's sports, drama or even education shows on TV, they all use this song in a sappy love story kind of way. . Download 'Beauty And The Beast' Game King Game and many more programs here!. Home; Bollywood Top List; Tamil; English; Kannada; Hindi; Hindi Movie. Top List. . Eve song tumse hum sab aata hai english duet song, sung by Sayantika. Any Body Can Dance 2 - আবার একটা জন্য বাজে 2 ডাইভ অফ কে কারণে. .

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Audio Assault Druminator 1.0.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Audio Assault Druminator 1.0.md deleted file mode 100644 index 835c9b379733a238c3a963dd41811ba08c67e3c0..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Audio Assault Druminator 1.0.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Audio Assault Druminator 1.0


              Download Ziphttps://urlin.us/2uExad



              -
              -Westwood Drums by Audio Assault is a virtual instrument audio plugin for macOS and Windows. It works as a VST Plugin, Audio Units Plugin, VST 3 Plugin, ... and has a fully customizable interface. With the plugin, you can mix bass, drums and other instruments with virtual synths. This makes the sound more realistic. Westwood Drums has an easy setup that allows you to create your own sound palette from pre-made templates or templates made for you. You can add up to eight different drum machines, virtual basses and bass synths. 8a78ff9644
              -
              -
              -

              diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (vasanthathil Oru Naal Tamil Movie Mp).md b/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (vasanthathil Oru Naal Tamil Movie Mp).md deleted file mode 100644 index 10f39e2b41c49b42fe0b7afced2257e9116775b5..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (vasanthathil Oru Naal Tamil Movie Mp).md +++ /dev/null @@ -1,6 +0,0 @@ -

              HD Online Player (vasanthathil oru naal tamil movie mp)


              Download ———>>> https://urlin.us/2uExFf



              -
              -... free online. Play P. Jayachandran Tamil MP3 songs or download P. Jayachandran latest MP3 from songs list and all Tamil music album online on Gaana.com. 1fdad05405
              -
              -
              -

              diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Hitman Absolution Keygen PASSWORD.full.20.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Hitman Absolution Keygen PASSWORD.full.20.md deleted file mode 100644 index 6031c7b2e63ff2be03e205548962001b0a1cfc44..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Hitman Absolution Keygen PASSWORD.full.20.md +++ /dev/null @@ -1,32 +0,0 @@ - -

              How to Download and Play Hitman Absolution with Keygen PASSWORD.full.20

              - -

              Hitman Absolution is a stealth action game that was released in 2012 by Square Enix. It follows the story of Agent 47, a professional assassin who is betrayed by his agency and hunted by the police. The game features a variety of missions, locations and weapons, as well as a mode called Contracts, where players can create and share their own custom scenarios.

              -

              Hitman Absolution Keygen PASSWORD.full.20


              Download Ziphttps://urlin.us/2uEwvy



              - -

              What is Keygen PASSWORD.full.20?

              - -

              Keygen PASSWORD.full.20 is a code that can be used to unlock the full version of Hitman Absolution for free. It is a crack that bypasses the security system of the game and allows players to install and play it without any restrictions. However, using Keygen PASSWORD.full.20 is illegal and risky, as it may contain malware or viruses that can harm your device or compromise your personal data.

              - -

              How to Download and Play Hitman Absolution with Keygen PASSWORD.full.20?

              - -

              If you still want to download and play Hitman Absolution with Keygen PASSWORD.full.20, you need to follow these steps:

              - -
                -
              1. Find a reliable website that offers Hitman Absolution Full Game with Crack and Keygen PASSWORD.full.20. You can search for it on Google or use some of the links provided in this article.
              2. -
              3. Download the game file and the crack file from the website. Make sure to scan them with an antivirus program before opening them.
              4. -
              5. Extract the game file and the crack file using a program like WinRAR or 7-Zip.
              6. -
              7. Copy the crack file and paste it into the game folder, replacing the original file.
              8. -
              9. Run the game as administrator and enter Keygen PASSWORD.full.20 when prompted.
              10. -
              11. Enjoy playing Hitman Absolution for free.
              12. -
              - -

              Conclusion

              - -

              Hitman Absolution is a great game that will challenge your skills and creativity as an assassin. However, downloading and playing it with Keygen PASSWORD.full.20 is not recommended, as it is illegal and unsafe. You should support the developers and buy the game legally from an official source, such as Steam or Origin.

              -

              Conclusion

              - -

              Hitman Absolution is a great game that will challenge your skills and creativity as an assassin. However, downloading and playing it with Keygen PASSWORD.full.20 is not recommended, as it is illegal and unsafe. You should support the developers and buy the game legally from an official source, such as Steam or Origin.

              -

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Adobe Photoshop CS3 Crack - Infinite Pirate Serial Key REPACK.md b/spaces/inreVtussa/clothingai/Examples/Adobe Photoshop CS3 Crack - Infinite Pirate Serial Key REPACK.md deleted file mode 100644 index 72d007462c30a6570957132206c04b55f8e51653..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Adobe Photoshop CS3 Crack - Infinite Pirate Serial Key REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Adobe Photoshop CS3 Crack - Infinite Pirate Serial Key


              DOWNLOAD →→→ https://tiurll.com/2uClbP



              -
              -Dreamweaver cs6 amtlib osx crack dll; Adobe Audition 1.5 Tpb -; Posts navigation; [D.o.w.`LOAD] Audition ... Photoshop CS6 PirateBay – torrent download. ... Download Infiniteskills – Illustrator CS5. ... Pirate adobe cc mac. ... After Effects CS3. 1fdad05405
              -
              -
              -

              diff --git a/spaces/inreVtussa/clothingai/Examples/Chemistry Form 5 Module Nilam Publication Zip.md b/spaces/inreVtussa/clothingai/Examples/Chemistry Form 5 Module Nilam Publication Zip.md deleted file mode 100644 index 273fe7d87056a5b9c315e392764e56b8c4ec58e7..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Chemistry Form 5 Module Nilam Publication Zip.md +++ /dev/null @@ -1,9 +0,0 @@ -
              -

              You can download the signed [Form] to your device or share it with other parties involved with a link or by email, as a result. Because of its cross-platform nature, signNow works on any device and any operating system. Select our signature tool and leave behind the old days with efficiency, affordability and security.

              -

              The whole procedure can last less than a minute. You can download the signed [Form] to your device or share it with other parties involved with a link or by email, as a result. Because of its cross-platform nature, signNow works on any device and any operating system. Select our signature tool and leave behind the old days with efficiency, affordability and security.

              -

              Chemistry Form 5 Module Nilam Publication Zip


              DOWNLOAD > https://tiurll.com/2uCm5B



              -

              Swiftly produce a Nilam Publication Chemistry Module Answer Form 4 without needing to involve experts. There are already more than 3 million people taking advantage of our rich catalogue of legal documents. Join us right now and get access to the top catalogue of browser-based templates. Try it yourself!

              -

              Swiftly produce a Nilam Publication Chemistry Module Answer Form 4 without needing to involve experts. There are already more than 3 million people taking advantage of our rich catalogue of legal documents. Join us right now and get access to the top catalogue of browser-based templates.

              -

              why not try us today? we have a team of professional writers who have vast experience in writing unique and quality content. our proofreader and editor free of cost. we offer genuine quality content, which means that you will not find plagiarism.

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/ismot/1702t1/models/modules/transformer.py b/spaces/ismot/1702t1/models/modules/transformer.py deleted file mode 100644 index f3d2aa093c748bbc1408491cacab153977b4a4cb..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/models/modules/transformer.py +++ /dev/null @@ -1,44 +0,0 @@ -from models.modules.transformer_modules import * - - -class Transformer(nn.Module): - def __init__(self, dim, depth, heads, win_size, dim_head, mlp_dim, - dropout=0., patch_num=None, ape=None, rpe=None, rpe_pos=1): - super().__init__() - - self.absolute_pos_embed = None if patch_num is None or ape is None else AbsolutePosition(dim, dropout, - patch_num, ape) - self.pos_dropout = nn.Dropout(dropout) - self.layers = nn.ModuleList([]) - for _ in range(depth): - self.layers.append(nn.ModuleList([ - PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout, patch_num=patch_num, - rpe=rpe, rpe_pos=rpe_pos)), - PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)) - ])) - - def forward(self, x): - if self.absolute_pos_embed is not None: - x = self.absolute_pos_embed(x) - x = self.pos_dropout(x) - for attn, ff in self.layers: - x = attn(x) + x - x = ff(x) + x - return x - - -if __name__ == '__main__': - token_dim = 1024 - toke_len = 256 - - transformer = Transformer(dim=token_dim, depth=6, heads=16, - dim_head=64, mlp_dim=2048, dropout=0.1, - patch_num=256, ape='lr_parameter', rpe='lr_parameter_mirror') - - total = sum(p.numel() for p in transformer.parameters()) - trainable = sum(p.numel() for p in transformer.parameters() if p.requires_grad) - print('parameter total:{:,}, trainable:{:,}'.format(total, trainable)) - - input = torch.randn(1, toke_len, token_dim) - output = transformer(input) - print(output.shape) diff --git a/spaces/j10sanders/rubber-duck/__init__.py b/spaces/j10sanders/rubber-duck/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/jarvisbot/ChatImprovement/functional_crazy.py b/spaces/jarvisbot/ChatImprovement/functional_crazy.py deleted file mode 100644 index 51e56524d244ee1b7d5e83b27569a5763b2dd1b7..0000000000000000000000000000000000000000 --- a/spaces/jarvisbot/ChatImprovement/functional_crazy.py +++ /dev/null @@ -1,36 +0,0 @@ - -def get_crazy_functionals(): - from crazy_functions.读文章写摘要 import 读文章写摘要 - from crazy_functions.生成函数注释 import 批量生成函数注释 - from crazy_functions.解析项目源代码 import 解析项目本身 - from crazy_functions.解析项目源代码 import 解析一个Python项目 - from crazy_functions.解析项目源代码 import 解析一个C项目的头文件 - from crazy_functions.高级功能函数模板 import 高阶功能模板函数 - - return { - "[实验] 请解析并解构此项目本身": { - "Function": 解析项目本身 - }, - "[实验] 解析整个py项目(input输入项目根路径)": { - "Color": "stop", # 按钮颜色 - "Function": 解析一个Python项目 - }, - "[实验] 解析整个C++项目(input输入项目根路径)": { - "Color": "stop", # 按钮颜色 - "Function": 解析一个C项目的头文件 - }, - "[实验] 读tex论文写摘要(input输入项目根路径)": { - "Color": "stop", # 按钮颜色 - "Function": 读文章写摘要 - }, - "[实验] 批量生成函数注释(input输入项目根路径)": { - "Color": "stop", # 按钮颜色 - "Function": 批量生成函数注释 - }, - "[实验] 实验功能函数模板": { - "Color": "stop", # 按钮颜色 - "Function": 高阶功能模板函数 - }, - } - - diff --git a/spaces/jbilcke-hf/observer/tailwind.config.js b/spaces/jbilcke-hf/observer/tailwind.config.js deleted file mode 100644 index ce2783d5277b5c05378042e0a47eed675e99b606..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/observer/tailwind.config.js +++ /dev/null @@ -1,46 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - darkMode: ["class"], - content: [ - './pages/**/*.{ts,tsx}', - './components/**/*.{ts,tsx}', - './app/**/*.{ts,tsx}', - './src/**/*.{ts,tsx}', - './src/lib/fonts.ts' - ], - theme: { - container: { - center: true, - padding: "2rem", - screens: { - "2xl": "1400px", - }, - }, - extend: { - fontFamily: { - sans: ['var(--font-inter)'], - edu: ['var(--font-edu)'], - orbitron: ['var(--font-orbitron)'], - amatic: ['var(--font-amatic)'], - macondo: ['var(--font-macondo)'], - imfell: ['var(--font-imfell)'], - lugrasimo: ['var(--font-lugrasimo)'], - }, - keyframes: { - "accordion-down": { - from: { height: 0 }, - to: { height: "var(--radix-accordion-content-height)" }, - }, - "accordion-up": { - from: { height: "var(--radix-accordion-content-height)" }, - to: { height: 0 }, - }, - }, - animation: { - "accordion-down": "accordion-down 0.2s ease-out", - "accordion-up": "accordion-up 0.2s ease-out", - }, - }, - }, - plugins: [require("tailwindcss-animate")], -} \ No newline at end of file diff --git a/spaces/jbochi/madlad400-3b-mt/app.py b/spaces/jbochi/madlad400-3b-mt/app.py deleted file mode 100644 index 06c931011744dfb7bf842d3e14dcbddb79525b5e..0000000000000000000000000000000000000000 --- a/spaces/jbochi/madlad400-3b-mt/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import time - -from transformers import T5ForConditionalGeneration, T5Tokenizer, GenerationConfig -import gradio as gr - -MODEL_NAME = "jbochi/madlad400-3b-mt" - -print(f"Loading {MODEL_NAME} tokenizer...") -tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) -print(f"Loading {MODEL_NAME} model...") -model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME, device_map="auto") - - -def inference(input_text, target_language, max_length): - global model, tokenizer - start_time = time.time() - input_ids = tokenizer( - f"<2{target_language}> {input_text}", return_tensors="pt" - ).input_ids - outputs = model.generate( - input_ids=input_ids.to(model.device), - generation_config=GenerationConfig(max_length=max_length), - ) - result = tokenizer.decode(outputs[0], skip_special_tokens=True) - end_time = time.time() - result = { - 'result': result, - 'inference_time': end_time - start_time, - 'input_token_ids': input_ids[0].tolist(), - 'output_token_ids': outputs[0].tolist(), - } - return result - - -def run(): - tokens = [tokenizer.decode(i) for i in range(500)] - lang_codes = [token[2:-1] for token in tokens if token.startswith("<2")] - inputs = [ - gr.components.Textbox(lines=5, label="Input text"), - gr.components.Dropdown(lang_codes, value="en", label="Target Language"), - gr.components.Slider( - minimum=5, - maximum=128, - value=50, - label="Max length", - ), - ] - examples = [ - ["I'm a mad lad!", "es", 50], - ["千里之行,始於足下", "en", 50], - ] - outputs = gr.components.JSON() - title = f"{MODEL_NAME} demo" - demo_status = "Demo is running on CPU" - description = f"Details: https://huggingface.co/{MODEL_NAME}. {demo_status}" - gr.Interface( - inference, - inputs, - outputs, - title=title, - description=description, - examples=examples, - ).launch() - - -if __name__ == "__main__": - run() diff --git a/spaces/jiejiejie0420/bingo/src/lib/bots/bing/sr.ts b/spaces/jiejiejie0420/bingo/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/jiejiejie0420/bingo/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ExifTags.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ExifTags.py deleted file mode 100644 index 2347c6d4c2768b6c946a386bba9f1325ed91193f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ExifTags.py +++ /dev/null @@ -1,380 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# EXIF tags -# -# Copyright (c) 2003 by Secret Labs AB -# -# See the README file for information on usage and redistribution. -# - -""" -This module provides constants and clear-text names for various -well-known EXIF tags. -""" - -from enum import IntEnum - - -class Base(IntEnum): - # possibly incomplete - InteropIndex = 0x0001 - ProcessingSoftware = 0x000B - NewSubfileType = 0x00FE - SubfileType = 0x00FF - ImageWidth = 0x0100 - ImageLength = 0x0101 - BitsPerSample = 0x0102 - Compression = 0x0103 - PhotometricInterpretation = 0x0106 - Thresholding = 0x0107 - CellWidth = 0x0108 - CellLength = 0x0109 - FillOrder = 0x010A - DocumentName = 0x010D - ImageDescription = 0x010E - Make = 0x010F - Model = 0x0110 - StripOffsets = 0x0111 - Orientation = 0x0112 - SamplesPerPixel = 0x0115 - RowsPerStrip = 0x0116 - StripByteCounts = 0x0117 - MinSampleValue = 0x0118 - MaxSampleValue = 0x0119 - XResolution = 0x011A - YResolution = 0x011B - PlanarConfiguration = 0x011C - PageName = 0x011D - FreeOffsets = 0x0120 - FreeByteCounts = 0x0121 - GrayResponseUnit = 0x0122 - GrayResponseCurve = 0x0123 - T4Options = 0x0124 - T6Options = 0x0125 - ResolutionUnit = 0x0128 - PageNumber = 0x0129 - TransferFunction = 0x012D - Software = 0x0131 - DateTime = 0x0132 - Artist = 0x013B - HostComputer = 0x013C - Predictor = 0x013D - WhitePoint = 0x013E - PrimaryChromaticities = 0x013F - ColorMap = 0x0140 - HalftoneHints = 0x0141 - TileWidth = 0x0142 - TileLength = 0x0143 - TileOffsets = 0x0144 - TileByteCounts = 0x0145 - SubIFDs = 0x014A - InkSet = 0x014C - InkNames = 0x014D - NumberOfInks = 0x014E - DotRange = 0x0150 - TargetPrinter = 0x0151 - ExtraSamples = 0x0152 - SampleFormat = 0x0153 - SMinSampleValue = 0x0154 - SMaxSampleValue = 0x0155 - TransferRange = 0x0156 - ClipPath = 0x0157 - XClipPathUnits = 0x0158 - YClipPathUnits = 0x0159 - Indexed = 0x015A - JPEGTables = 0x015B - OPIProxy = 0x015F - JPEGProc = 0x0200 - JpegIFOffset = 0x0201 - JpegIFByteCount = 0x0202 - JpegRestartInterval = 0x0203 - JpegLosslessPredictors = 0x0205 - JpegPointTransforms = 0x0206 - JpegQTables = 0x0207 - JpegDCTables = 0x0208 - JpegACTables = 0x0209 - YCbCrCoefficients = 0x0211 - YCbCrSubSampling = 0x0212 - YCbCrPositioning = 0x0213 - ReferenceBlackWhite = 0x0214 - XMLPacket = 0x02BC - RelatedImageFileFormat = 0x1000 - RelatedImageWidth = 0x1001 - RelatedImageLength = 0x1002 - Rating = 0x4746 - RatingPercent = 0x4749 - ImageID = 0x800D - CFARepeatPatternDim = 0x828D - BatteryLevel = 0x828F - Copyright = 0x8298 - ExposureTime = 0x829A - FNumber = 0x829D - IPTCNAA = 0x83BB - ImageResources = 0x8649 - ExifOffset = 0x8769 - InterColorProfile = 0x8773 - ExposureProgram = 0x8822 - SpectralSensitivity = 0x8824 - GPSInfo = 0x8825 - ISOSpeedRatings = 0x8827 - OECF = 0x8828 - Interlace = 0x8829 - TimeZoneOffset = 0x882A - SelfTimerMode = 0x882B - SensitivityType = 0x8830 - StandardOutputSensitivity = 0x8831 - RecommendedExposureIndex = 0x8832 - ISOSpeed = 0x8833 - ISOSpeedLatitudeyyy = 0x8834 - ISOSpeedLatitudezzz = 0x8835 - ExifVersion = 0x9000 - DateTimeOriginal = 0x9003 - DateTimeDigitized = 0x9004 - OffsetTime = 0x9010 - OffsetTimeOriginal = 0x9011 - OffsetTimeDigitized = 0x9012 - ComponentsConfiguration = 0x9101 - CompressedBitsPerPixel = 0x9102 - ShutterSpeedValue = 0x9201 - ApertureValue = 0x9202 - BrightnessValue = 0x9203 - ExposureBiasValue = 0x9204 - MaxApertureValue = 0x9205 - SubjectDistance = 0x9206 - MeteringMode = 0x9207 - LightSource = 0x9208 - Flash = 0x9209 - FocalLength = 0x920A - Noise = 0x920D - ImageNumber = 0x9211 - SecurityClassification = 0x9212 - ImageHistory = 0x9213 - TIFFEPStandardID = 0x9216 - MakerNote = 0x927C - UserComment = 0x9286 - SubsecTime = 0x9290 - SubsecTimeOriginal = 0x9291 - SubsecTimeDigitized = 0x9292 - AmbientTemperature = 0x9400 - Humidity = 0x9401 - Pressure = 0x9402 - WaterDepth = 0x9403 - Acceleration = 0x9404 - CameraElevationAngle = 0x9405 - XPTitle = 0x9C9B - XPComment = 0x9C9C - XPAuthor = 0x9C9D - XPKeywords = 0x9C9E - XPSubject = 0x9C9F - FlashPixVersion = 0xA000 - ColorSpace = 0xA001 - ExifImageWidth = 0xA002 - ExifImageHeight = 0xA003 - RelatedSoundFile = 0xA004 - ExifInteroperabilityOffset = 0xA005 - FlashEnergy = 0xA20B - SpatialFrequencyResponse = 0xA20C - FocalPlaneXResolution = 0xA20E - FocalPlaneYResolution = 0xA20F - FocalPlaneResolutionUnit = 0xA210 - SubjectLocation = 0xA214 - ExposureIndex = 0xA215 - SensingMethod = 0xA217 - FileSource = 0xA300 - SceneType = 0xA301 - CFAPattern = 0xA302 - CustomRendered = 0xA401 - ExposureMode = 0xA402 - WhiteBalance = 0xA403 - DigitalZoomRatio = 0xA404 - FocalLengthIn35mmFilm = 0xA405 - SceneCaptureType = 0xA406 - GainControl = 0xA407 - Contrast = 0xA408 - Saturation = 0xA409 - Sharpness = 0xA40A - DeviceSettingDescription = 0xA40B - SubjectDistanceRange = 0xA40C - ImageUniqueID = 0xA420 - CameraOwnerName = 0xA430 - BodySerialNumber = 0xA431 - LensSpecification = 0xA432 - LensMake = 0xA433 - LensModel = 0xA434 - LensSerialNumber = 0xA435 - CompositeImage = 0xA460 - CompositeImageCount = 0xA461 - CompositeImageExposureTimes = 0xA462 - Gamma = 0xA500 - PrintImageMatching = 0xC4A5 - DNGVersion = 0xC612 - DNGBackwardVersion = 0xC613 - UniqueCameraModel = 0xC614 - LocalizedCameraModel = 0xC615 - CFAPlaneColor = 0xC616 - CFALayout = 0xC617 - LinearizationTable = 0xC618 - BlackLevelRepeatDim = 0xC619 - BlackLevel = 0xC61A - BlackLevelDeltaH = 0xC61B - BlackLevelDeltaV = 0xC61C - WhiteLevel = 0xC61D - DefaultScale = 0xC61E - DefaultCropOrigin = 0xC61F - DefaultCropSize = 0xC620 - ColorMatrix1 = 0xC621 - ColorMatrix2 = 0xC622 - CameraCalibration1 = 0xC623 - CameraCalibration2 = 0xC624 - ReductionMatrix1 = 0xC625 - ReductionMatrix2 = 0xC626 - AnalogBalance = 0xC627 - AsShotNeutral = 0xC628 - AsShotWhiteXY = 0xC629 - BaselineExposure = 0xC62A - BaselineNoise = 0xC62B - BaselineSharpness = 0xC62C - BayerGreenSplit = 0xC62D - LinearResponseLimit = 0xC62E - CameraSerialNumber = 0xC62F - LensInfo = 0xC630 - ChromaBlurRadius = 0xC631 - AntiAliasStrength = 0xC632 - ShadowScale = 0xC633 - DNGPrivateData = 0xC634 - MakerNoteSafety = 0xC635 - CalibrationIlluminant1 = 0xC65A - CalibrationIlluminant2 = 0xC65B - BestQualityScale = 0xC65C - RawDataUniqueID = 0xC65D - OriginalRawFileName = 0xC68B - OriginalRawFileData = 0xC68C - ActiveArea = 0xC68D - MaskedAreas = 0xC68E - AsShotICCProfile = 0xC68F - AsShotPreProfileMatrix = 0xC690 - CurrentICCProfile = 0xC691 - CurrentPreProfileMatrix = 0xC692 - ColorimetricReference = 0xC6BF - CameraCalibrationSignature = 0xC6F3 - ProfileCalibrationSignature = 0xC6F4 - AsShotProfileName = 0xC6F6 - NoiseReductionApplied = 0xC6F7 - ProfileName = 0xC6F8 - ProfileHueSatMapDims = 0xC6F9 - ProfileHueSatMapData1 = 0xC6FA - ProfileHueSatMapData2 = 0xC6FB - ProfileToneCurve = 0xC6FC - ProfileEmbedPolicy = 0xC6FD - ProfileCopyright = 0xC6FE - ForwardMatrix1 = 0xC714 - ForwardMatrix2 = 0xC715 - PreviewApplicationName = 0xC716 - PreviewApplicationVersion = 0xC717 - PreviewSettingsName = 0xC718 - PreviewSettingsDigest = 0xC719 - PreviewColorSpace = 0xC71A - PreviewDateTime = 0xC71B - RawImageDigest = 0xC71C - OriginalRawFileDigest = 0xC71D - SubTileBlockSize = 0xC71E - RowInterleaveFactor = 0xC71F - ProfileLookTableDims = 0xC725 - ProfileLookTableData = 0xC726 - OpcodeList1 = 0xC740 - OpcodeList2 = 0xC741 - OpcodeList3 = 0xC74E - NoiseProfile = 0xC761 - - -"""Maps EXIF tags to tag names.""" -TAGS = { - **{i.value: i.name for i in Base}, - 0x920C: "SpatialFrequencyResponse", - 0x9214: "SubjectLocation", - 0x9215: "ExposureIndex", - 0x828E: "CFAPattern", - 0x920B: "FlashEnergy", - 0x9216: "TIFF/EPStandardID", -} - - -class GPS(IntEnum): - GPSVersionID = 0 - GPSLatitudeRef = 1 - GPSLatitude = 2 - GPSLongitudeRef = 3 - GPSLongitude = 4 - GPSAltitudeRef = 5 - GPSAltitude = 6 - GPSTimeStamp = 7 - GPSSatellites = 8 - GPSStatus = 9 - GPSMeasureMode = 10 - GPSDOP = 11 - GPSSpeedRef = 12 - GPSSpeed = 13 - GPSTrackRef = 14 - GPSTrack = 15 - GPSImgDirectionRef = 16 - GPSImgDirection = 17 - GPSMapDatum = 18 - GPSDestLatitudeRef = 19 - GPSDestLatitude = 20 - GPSDestLongitudeRef = 21 - GPSDestLongitude = 22 - GPSDestBearingRef = 23 - GPSDestBearing = 24 - GPSDestDistanceRef = 25 - GPSDestDistance = 26 - GPSProcessingMethod = 27 - GPSAreaInformation = 28 - GPSDateStamp = 29 - GPSDifferential = 30 - GPSHPositioningError = 31 - - -"""Maps EXIF GPS tags to tag names.""" -GPSTAGS = {i.value: i.name for i in GPS} - - -class Interop(IntEnum): - InteropIndex = 1 - InteropVersion = 2 - RelatedImageFileFormat = 4096 - RelatedImageWidth = 4097 - RleatedImageHeight = 4098 - - -class IFD(IntEnum): - Exif = 34665 - GPSInfo = 34853 - Makernote = 37500 - Interop = 40965 - IFD1 = -1 - - -class LightSource(IntEnum): - Unknown = 0 - Daylight = 1 - Fluorescent = 2 - Tungsten = 3 - Flash = 4 - Fine = 9 - Cloudy = 10 - Shade = 11 - DaylightFluorescent = 12 - DayWhiteFluorescent = 13 - CoolWhiteFluorescent = 14 - WhiteFluorescent = 15 - StandardLightA = 17 - StandardLightB = 18 - StandardLightC = 19 - D55 = 20 - D65 = 21 - D75 = 22 - D50 = 23 - ISO = 24 - Other = 255 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/otlLib/error.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/otlLib/error.py deleted file mode 100644 index 1cbef578341aa49e2ed62a35da7b53b9ce0ca25a..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/otlLib/error.py +++ /dev/null @@ -1,11 +0,0 @@ -class OpenTypeLibError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - return f"{self.location}: {message}" - else: - return message diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/varLib/varStore.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/varLib/varStore.py deleted file mode 100644 index 74828e407ef5564f1623383201ed75e688a2eb96..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/varLib/varStore.py +++ /dev/null @@ -1,703 +0,0 @@ -from fontTools.misc.roundTools import noRound, otRound -from fontTools.misc.intTools import bit_count -from fontTools.ttLib.tables import otTables as ot -from fontTools.varLib.models import supportScalar -from fontTools.varLib.builder import ( - buildVarRegionList, - buildVarStore, - buildVarRegion, - buildVarData, -) -from functools import partial -from collections import defaultdict -from heapq import heappush, heappop - - -NO_VARIATION_INDEX = ot.NO_VARIATION_INDEX -ot.VarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX - - -def _getLocationKey(loc): - return tuple(sorted(loc.items(), key=lambda kv: kv[0])) - - -class OnlineVarStoreBuilder(object): - def __init__(self, axisTags): - self._axisTags = axisTags - self._regionMap = {} - self._regionList = buildVarRegionList([], axisTags) - self._store = buildVarStore(self._regionList, []) - self._data = None - self._model = None - self._supports = None - self._varDataIndices = {} - self._varDataCaches = {} - self._cache = {} - - def setModel(self, model): - self.setSupports(model.supports) - self._model = model - - def setSupports(self, supports): - self._model = None - self._supports = list(supports) - if not self._supports[0]: - del self._supports[0] # Drop base master support - self._cache = {} - self._data = None - - def finish(self, optimize=True): - self._regionList.RegionCount = len(self._regionList.Region) - self._store.VarDataCount = len(self._store.VarData) - for data in self._store.VarData: - data.ItemCount = len(data.Item) - data.calculateNumShorts(optimize=optimize) - return self._store - - def _add_VarData(self): - regionMap = self._regionMap - regionList = self._regionList - - regions = self._supports - regionIndices = [] - for region in regions: - key = _getLocationKey(region) - idx = regionMap.get(key) - if idx is None: - varRegion = buildVarRegion(region, self._axisTags) - idx = regionMap[key] = len(regionList.Region) - regionList.Region.append(varRegion) - regionIndices.append(idx) - - # Check if we have one already... - key = tuple(regionIndices) - varDataIdx = self._varDataIndices.get(key) - if varDataIdx is not None: - self._outer = varDataIdx - self._data = self._store.VarData[varDataIdx] - self._cache = self._varDataCaches[key] - if len(self._data.Item) == 0xFFFF: - # This is full. Need new one. - varDataIdx = None - - if varDataIdx is None: - self._data = buildVarData(regionIndices, [], optimize=False) - self._outer = len(self._store.VarData) - self._store.VarData.append(self._data) - self._varDataIndices[key] = self._outer - if key not in self._varDataCaches: - self._varDataCaches[key] = {} - self._cache = self._varDataCaches[key] - - def storeMasters(self, master_values, *, round=round): - deltas = self._model.getDeltas(master_values, round=round) - base = deltas.pop(0) - return base, self.storeDeltas(deltas, round=noRound) - - def storeDeltas(self, deltas, *, round=round): - deltas = [round(d) for d in deltas] - if len(deltas) == len(self._supports) + 1: - deltas = tuple(deltas[1:]) - else: - assert len(deltas) == len(self._supports) - deltas = tuple(deltas) - - varIdx = self._cache.get(deltas) - if varIdx is not None: - return varIdx - - if not self._data: - self._add_VarData() - inner = len(self._data.Item) - if inner == 0xFFFF: - # Full array. Start new one. - self._add_VarData() - return self.storeDeltas(deltas) - self._data.addItem(deltas, round=noRound) - - varIdx = (self._outer << 16) + inner - self._cache[deltas] = varIdx - return varIdx - - -def VarData_addItem(self, deltas, *, round=round): - deltas = [round(d) for d in deltas] - - countUs = self.VarRegionCount - countThem = len(deltas) - if countUs + 1 == countThem: - deltas = tuple(deltas[1:]) - else: - assert countUs == countThem, (countUs, countThem) - deltas = tuple(deltas) - self.Item.append(list(deltas)) - self.ItemCount = len(self.Item) - - -ot.VarData.addItem = VarData_addItem - - -def VarRegion_get_support(self, fvar_axes): - return { - fvar_axes[i].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord) - for i, reg in enumerate(self.VarRegionAxis) - if reg.PeakCoord != 0 - } - - -ot.VarRegion.get_support = VarRegion_get_support - - -def VarStore___bool__(self): - return bool(self.VarData) - - -ot.VarStore.__bool__ = VarStore___bool__ - - -class VarStoreInstancer(object): - def __init__(self, varstore, fvar_axes, location={}): - self.fvar_axes = fvar_axes - assert varstore is None or varstore.Format == 1 - self._varData = varstore.VarData if varstore else [] - self._regions = varstore.VarRegionList.Region if varstore else [] - self.setLocation(location) - - def setLocation(self, location): - self.location = dict(location) - self._clearCaches() - - def _clearCaches(self): - self._scalars = {} - - def _getScalar(self, regionIdx): - scalar = self._scalars.get(regionIdx) - if scalar is None: - support = self._regions[regionIdx].get_support(self.fvar_axes) - scalar = supportScalar(self.location, support) - self._scalars[regionIdx] = scalar - return scalar - - @staticmethod - def interpolateFromDeltasAndScalars(deltas, scalars): - delta = 0.0 - for d, s in zip(deltas, scalars): - if not s: - continue - delta += d * s - return delta - - def __getitem__(self, varidx): - major, minor = varidx >> 16, varidx & 0xFFFF - if varidx == NO_VARIATION_INDEX: - return 0.0 - varData = self._varData - scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex] - deltas = varData[major].Item[minor] - return self.interpolateFromDeltasAndScalars(deltas, scalars) - - def interpolateFromDeltas(self, varDataIndex, deltas): - varData = self._varData - scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex] - return self.interpolateFromDeltasAndScalars(deltas, scalars) - - -# -# Optimizations -# -# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed -# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow. - - -def VarStore_subset_varidxes( - self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set() -): - # Sort out used varIdxes by major/minor. - used = {} - for varIdx in varIdxes: - if varIdx == NO_VARIATION_INDEX: - continue - major = varIdx >> 16 - minor = varIdx & 0xFFFF - d = used.get(major) - if d is None: - d = used[major] = set() - d.add(minor) - del varIdxes - - # - # Subset VarData - # - - varData = self.VarData - newVarData = [] - varDataMap = {NO_VARIATION_INDEX: NO_VARIATION_INDEX} - for major, data in enumerate(varData): - usedMinors = used.get(major) - if usedMinors is None: - continue - newMajor = len(newVarData) - newVarData.append(data) - - items = data.Item - newItems = [] - if major == 0 and retainFirstMap: - for minor in range(len(items)): - newItems.append( - items[minor] if minor in usedMinors else [0] * len(items[minor]) - ) - varDataMap[minor] = minor - else: - if major == 0: - minors = sorted(advIdxes) + sorted(usedMinors - advIdxes) - else: - minors = sorted(usedMinors) - for minor in minors: - newMinor = len(newItems) - newItems.append(items[minor]) - varDataMap[(major << 16) + minor] = (newMajor << 16) + newMinor - - data.Item = newItems - data.ItemCount = len(data.Item) - - data.calculateNumShorts(optimize=optimize) - - self.VarData = newVarData - self.VarDataCount = len(self.VarData) - - self.prune_regions() - - return varDataMap - - -ot.VarStore.subset_varidxes = VarStore_subset_varidxes - - -def VarStore_prune_regions(self): - """Remove unused VarRegions.""" - # - # Subset VarRegionList - # - - # Collect. - usedRegions = set() - for data in self.VarData: - usedRegions.update(data.VarRegionIndex) - # Subset. - regionList = self.VarRegionList - regions = regionList.Region - newRegions = [] - regionMap = {} - for i in sorted(usedRegions): - regionMap[i] = len(newRegions) - newRegions.append(regions[i]) - regionList.Region = newRegions - regionList.RegionCount = len(regionList.Region) - # Map. - for data in self.VarData: - data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex] - - -ot.VarStore.prune_regions = VarStore_prune_regions - - -def _visit(self, func): - """Recurse down from self, if type of an object is ot.Device, - call func() on it. Works on otData-style classes.""" - - if type(self) == ot.Device: - func(self) - - elif isinstance(self, list): - for that in self: - _visit(that, func) - - elif hasattr(self, "getConverters") and not hasattr(self, "postRead"): - for conv in self.getConverters(): - that = getattr(self, conv.name, None) - if that is not None: - _visit(that, func) - - elif isinstance(self, ot.ValueRecord): - for that in self.__dict__.values(): - _visit(that, func) - - -def _Device_recordVarIdx(self, s): - """Add VarIdx in this Device table (if any) to the set s.""" - if self.DeltaFormat == 0x8000: - s.add((self.StartSize << 16) + self.EndSize) - - -def Object_collect_device_varidxes(self, varidxes): - adder = partial(_Device_recordVarIdx, s=varidxes) - _visit(self, adder) - - -ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes -ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes - - -def _Device_mapVarIdx(self, mapping, done): - """Map VarIdx in this Device table (if any) through mapping.""" - if id(self) in done: - return - done.add(id(self)) - if self.DeltaFormat == 0x8000: - varIdx = mapping[(self.StartSize << 16) + self.EndSize] - self.StartSize = varIdx >> 16 - self.EndSize = varIdx & 0xFFFF - - -def Object_remap_device_varidxes(self, varidxes_map): - mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set()) - _visit(self, mapper) - - -ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes -ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes - - -class _Encoding(object): - def __init__(self, chars): - self.chars = chars - self.width = bit_count(chars) - self.columns = self._columns(chars) - self.overhead = self._characteristic_overhead(self.columns) - self.items = set() - - def append(self, row): - self.items.add(row) - - def extend(self, lst): - self.items.update(lst) - - def get_room(self): - """Maximum number of bytes that can be added to characteristic - while still being beneficial to merge it into another one.""" - count = len(self.items) - return max(0, (self.overhead - 1) // count - self.width) - - room = property(get_room) - - def get_gain(self): - """Maximum possible byte gain from merging this into another - characteristic.""" - count = len(self.items) - return max(0, self.overhead - count) - - gain = property(get_gain) - - def gain_sort_key(self): - return self.gain, self.chars - - def width_sort_key(self): - return self.width, self.chars - - @staticmethod - def _characteristic_overhead(columns): - """Returns overhead in bytes of encoding this characteristic - as a VarData.""" - c = 4 + 6 # 4 bytes for LOffset, 6 bytes for VarData header - c += bit_count(columns) * 2 - return c - - @staticmethod - def _columns(chars): - cols = 0 - i = 1 - while chars: - if chars & 0b1111: - cols |= i - chars >>= 4 - i <<= 1 - return cols - - def gain_from_merging(self, other_encoding): - combined_chars = other_encoding.chars | self.chars - combined_width = bit_count(combined_chars) - combined_columns = self.columns | other_encoding.columns - combined_overhead = _Encoding._characteristic_overhead(combined_columns) - combined_gain = ( - +self.overhead - + other_encoding.overhead - - combined_overhead - - (combined_width - self.width) * len(self.items) - - (combined_width - other_encoding.width) * len(other_encoding.items) - ) - return combined_gain - - -class _EncodingDict(dict): - def __missing__(self, chars): - r = self[chars] = _Encoding(chars) - return r - - def add_row(self, row): - chars = self._row_characteristics(row) - self[chars].append(row) - - @staticmethod - def _row_characteristics(row): - """Returns encoding characteristics for a row.""" - longWords = False - - chars = 0 - i = 1 - for v in row: - if v: - chars += i - if not (-128 <= v <= 127): - chars += i * 0b0010 - if not (-32768 <= v <= 32767): - longWords = True - break - i <<= 4 - - if longWords: - # Redo; only allow 2byte/4byte encoding - chars = 0 - i = 1 - for v in row: - if v: - chars += i * 0b0011 - if not (-32768 <= v <= 32767): - chars += i * 0b1100 - i <<= 4 - - return chars - - -def VarStore_optimize(self, use_NO_VARIATION_INDEX=True, quantization=1): - """Optimize storage. Returns mapping from old VarIdxes to new ones.""" - - # Overview: - # - # For each VarData row, we first extend it with zeroes to have - # one column per region in VarRegionList. We then group the - # rows into _Encoding objects, by their "characteristic" bitmap. - # The characteristic bitmap is a binary number representing how - # many bytes each column of the data takes up to encode. Each - # column is encoded in four bits. For example, if a column has - # only values in the range -128..127, it would only have a single - # bit set in the characteristic bitmap for that column. If it has - # values in the range -32768..32767, it would have two bits set. - # The number of ones in the characteristic bitmap is the "width" - # of the encoding. - # - # Each encoding as such has a number of "active" (ie. non-zero) - # columns. The overhead of encoding the characteristic bitmap - # is 10 bytes, plus 2 bytes per active column. - # - # When an encoding is merged into another one, if the characteristic - # of the old encoding is a subset of the new one, then the overhead - # of the old encoding is completely eliminated. However, each row - # now would require more bytes to encode, to the tune of one byte - # per characteristic bit that is active in the new encoding but not - # in the old one. The number of bits that can be added to an encoding - # while still beneficial to merge it into another encoding is called - # the "room" for that encoding. - # - # The "gain" of an encodings is the maximum number of bytes we can - # save by merging it into another encoding. The "gain" of merging - # two encodings is how many bytes we save by doing so. - # - # High-level algorithm: - # - # - Each encoding has a minimal way to encode it. However, because - # of the overhead of encoding the characteristic bitmap, it may - # be beneficial to merge two encodings together, if there is - # gain in doing so. As such, we need to search for the best - # such successive merges. - # - # Algorithm: - # - # - Put all encodings into a "todo" list. - # - # - Sort todo list by decreasing gain (for stability). - # - # - Make a priority-queue of the gain from combining each two - # encodings in the todo list. The priority queue is sorted by - # decreasing gain. Only positive gains are included. - # - # - While priority queue is not empty: - # - Pop the first item from the priority queue, - # - Merge the two encodings it represents, - # - Remove the two encodings from the todo list, - # - Insert positive gains from combining the new encoding with - # all existing todo list items into the priority queue, - # - If a todo list item with the same characteristic bitmap as - # the new encoding exists, remove it from the todo list and - # merge it into the new encoding. - # - Insert the new encoding into the todo list, - # - # - Encode all remaining items in the todo list. - - # TODO - # Check that no two VarRegions are the same; if they are, fold them. - - n = len(self.VarRegionList.Region) # Number of columns - zeroes = [0] * n - - front_mapping = {} # Map from old VarIdxes to full row tuples - - encodings = _EncodingDict() - - # Collect all items into a set of full rows (with lots of zeroes.) - for major, data in enumerate(self.VarData): - regionIndices = data.VarRegionIndex - - for minor, item in enumerate(data.Item): - row = list(zeroes) - - if quantization == 1: - for regionIdx, v in zip(regionIndices, item): - row[regionIdx] += v - else: - for regionIdx, v in zip(regionIndices, item): - row[regionIdx] += ( - round(v / quantization) * quantization - ) # TODO https://github.com/fonttools/fonttools/pull/3126#discussion_r1205439785 - - row = tuple(row) - - if use_NO_VARIATION_INDEX and not any(row): - front_mapping[(major << 16) + minor] = None - continue - - encodings.add_row(row) - front_mapping[(major << 16) + minor] = row - - # Prepare for the main algorithm. - todo = sorted(encodings.values(), key=_Encoding.gain_sort_key) - del encodings - - # Repeatedly pick two best encodings to combine, and combine them. - - heap = [] - for i, encoding in enumerate(todo): - for j in range(i + 1, len(todo)): - other_encoding = todo[j] - combining_gain = encoding.gain_from_merging(other_encoding) - if combining_gain > 0: - heappush(heap, (-combining_gain, i, j)) - - while heap: - _, i, j = heappop(heap) - if todo[i] is None or todo[j] is None: - continue - - encoding, other_encoding = todo[i], todo[j] - todo[i], todo[j] = None, None - - # Combine the two encodings - combined_chars = other_encoding.chars | encoding.chars - combined_encoding = _Encoding(combined_chars) - combined_encoding.extend(encoding.items) - combined_encoding.extend(other_encoding.items) - - for k, enc in enumerate(todo): - if enc is None: - continue - - # In the unlikely event that the same encoding exists already, - # combine it. - if enc.chars == combined_chars: - combined_encoding.extend(enc.items) - todo[k] = None - continue - - combining_gain = combined_encoding.gain_from_merging(enc) - if combining_gain > 0: - heappush(heap, (-combining_gain, k, len(todo))) - - todo.append(combined_encoding) - - encodings = [encoding for encoding in todo if encoding is not None] - - # Assemble final store. - back_mapping = {} # Mapping from full rows to new VarIdxes - encodings.sort(key=_Encoding.width_sort_key) - self.VarData = [] - for major, encoding in enumerate(encodings): - data = ot.VarData() - self.VarData.append(data) - data.VarRegionIndex = range(n) - data.VarRegionCount = len(data.VarRegionIndex) - data.Item = sorted(encoding.items) - for minor, item in enumerate(data.Item): - back_mapping[item] = (major << 16) + minor - - # Compile final mapping. - varidx_map = {NO_VARIATION_INDEX: NO_VARIATION_INDEX} - for k, v in front_mapping.items(): - varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX - - # Recalculate things and go home. - self.VarRegionList.RegionCount = len(self.VarRegionList.Region) - self.VarDataCount = len(self.VarData) - for data in self.VarData: - data.ItemCount = len(data.Item) - data.optimize() - - # Remove unused regions. - self.prune_regions() - - return varidx_map - - -ot.VarStore.optimize = VarStore_optimize - - -def main(args=None): - """Optimize a font's GDEF variation store""" - from argparse import ArgumentParser - from fontTools import configLogger - from fontTools.ttLib import TTFont - from fontTools.ttLib.tables.otBase import OTTableWriter - - parser = ArgumentParser(prog="varLib.varStore", description=main.__doc__) - parser.add_argument("--quantization", type=int, default=1) - parser.add_argument("fontfile") - parser.add_argument("outfile", nargs="?") - options = parser.parse_args(args) - - # TODO: allow user to configure logging via command-line options - configLogger(level="INFO") - - quantization = options.quantization - fontfile = options.fontfile - outfile = options.outfile - - font = TTFont(fontfile) - gdef = font["GDEF"] - store = gdef.table.VarStore - - writer = OTTableWriter() - store.compile(writer, font) - size = len(writer.getAllData()) - print("Before: %7d bytes" % size) - - varidx_map = store.optimize(quantization=quantization) - - writer = OTTableWriter() - store.compile(writer, font) - size = len(writer.getAllData()) - print("After: %7d bytes" % size) - - if outfile is not None: - gdef.table.remap_device_varidxes(varidx_map) - if "GPOS" in font: - font["GPOS"].table.remap_device_varidxes(varidx_map) - - font.save(outfile) - - -if __name__ == "__main__": - import sys - - if len(sys.argv) > 1: - sys.exit(main()) - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/jonigata/PoseTweak/js/layeredCanvas.js b/spaces/jonigata/PoseTweak/js/layeredCanvas.js deleted file mode 100644 index cf48e5b16a523b24f9c9ebbefa8d346fc926466e..0000000000000000000000000000000000000000 --- a/spaces/jonigata/PoseTweak/js/layeredCanvas.js +++ /dev/null @@ -1,128 +0,0 @@ -export class LayeredCanvas { - constructor(c) { - console.log("initializeLayeredCanvas"); - this.canvas = c; - this.context = canvas.getContext('2d'); - - this.canvas.addEventListener('mousedown', this.handleMouseDown.bind(this)); - this.canvas.addEventListener('mousemove', this.handleMouseMove.bind(this)); - this.canvas.addEventListener('mouseup', this.handleMouseUp.bind(this)); - this.canvas.addEventListener('mouseleave', this.handleMouseLeave.bind(this)); - - this.layers = []; - } - - cleanup() { - this.canvas.removeEventListener('mousedown', this.handleMouseDown.bind(this)); - this.canvas.removeEventListener('mousemove', this.handleMouseMove.bind(this)); - this.canvas.removeEventListener('mouseup', this.handleMouseUp.bind(this)); - this.canvas.removeEventListener('mouseleave', this.handleMouseLeave.bind(this)); - } - - getCanvasSize() { - return [this.canvas.width, this.canvas.height]; - } - - getCanvasPosition(event) { - const rect = this.canvas.getBoundingClientRect(); - const x = Math.floor(event.clientX - rect.left); - const y = Math.floor(event.clientY - rect.top); - return [x, y]; - } - - handleMouseDown(event) { - const p = this.getCanvasPosition(event); - - for (let i = this.layers.length - 1; i >= 0; i--) { - const layer = this.layers[i]; - if (layer.accepts(p)) { - layer.mouseDown(p); - this.draggingLayer = layer; - this.dragStart = p; - break; - } - } - } - - handleMouseMove(event) { - this.mouseCursor = this.getCanvasPosition(event); - if (this.draggingLayer) { - this.draggingLayer.mouseMove(this.getCanvasPosition(event)); // 念のため別の実体 - } - this.render(); - } - - handleMouseUp(event) { - if (this.draggingLayer) { - this.draggingLayer.mouseUp(this.getCanvasPosition(event)); - this.draggingLayer = null; - } - } - - handleMouseLeave(event) { - this.mouseCursor = [-1,-1]; - if (this.draggingLayer) { - this.handleMouseUp(event); - } - } - - render() { - for (let i = 0; i < this.layers.length; i++) { - const layer = this.layers[i]; - layer.render(this.canvas, this.context); - } - } - - redraw() { - this.render(); - } - - addLayer(layer) { - this.layers.push(layer); - } - -} - - -let mouseSequence = { // mixin - mouseDown(p) { - this.mouseHandler = this.mouse(p); - }, - mouseMove(p) { - if (this.mouseHandler) { - this.mouseHandler.next(p); - } - }, - mouseUp(p) { - if (this.mouseHandler) { - this.mouseHandler.next(null); - this.mouseHandler = null; - } - }, -/* - sample mouse handler - *mouse(p) { - while (p = yield) { - console.log("mouse", p); - } - } -*/ -}; - -export function sequentializeMouse(layerClass) { - layerClass.mouseDown = mouseSequence.mouseDown; - layerClass.mouseMove = mouseSequence.mouseMove; - layerClass.mouseUp = mouseSequence.mouseUp; -} - -export class Layer { - constructor() {} - - reserveRender() { renderReserved = true; } - - accepts(point) { return false; } - mouseDown(point) {} - mouseMove(point) {} - mouseUp(point) {} - render(canvas, ctx) {} -} \ No newline at end of file diff --git a/spaces/jw2yang/unicl-img-recog-demo/model/text_encoder/build.py b/spaces/jw2yang/unicl-img-recog-demo/model/text_encoder/build.py deleted file mode 100644 index 21717b73146f2be5fa823e5bd8f4dd0b144d188c..0000000000000000000000000000000000000000 --- a/spaces/jw2yang/unicl-img-recog-demo/model/text_encoder/build.py +++ /dev/null @@ -1,31 +0,0 @@ -import os - -from transformers import CLIPTokenizer -from transformers import AutoTokenizer - -from .registry import lang_encoders -from .registry import is_lang_encoder - - -def build_lang_encoder(config_encoder, tokenizer, verbose, **kwargs): - model_name = config_encoder['NAME'] - - if not is_lang_encoder(model_name): - raise ValueError(f'Unknown model: {model_name}') - - return lang_encoders(model_name)(config_encoder, tokenizer, verbose, **kwargs) - - -def build_tokenizer(config_encoder): - tokenizer = None - os.environ['TOKENIZERS_PARALLELISM'] = 'true' - if config_encoder['TOKENIZER'] == 'clip': - pretrained_tokenizer = config_encoder.get( - 'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32' - ) - tokenizer = CLIPTokenizer.from_pretrained(pretrained_tokenizer) - tokenizer.add_special_tokens({'cls_token': tokenizer.eos_token}) - else: - tokenizer = AutoTokenizer.from_pretrained(config_encoder['TOKENIZER']) - - return tokenizer diff --git a/spaces/jyseo/3DFuse/ldm/modules/midas/midas/midas_net.py b/spaces/jyseo/3DFuse/ldm/modules/midas/midas/midas_net.py deleted file mode 100644 index 8a954977800b0a0f48807e80fa63041910e33c1f..0000000000000000000000000000000000000000 --- a/spaces/jyseo/3DFuse/ldm/modules/midas/midas/midas_net.py +++ /dev/null @@ -1,76 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, Interpolate, _make_encoder - - -class MidasNet(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=256, non_negative=True): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet, self).__init__() - - use_pretrained = False if path is None else True - - self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained) - - self.scratch.refinenet4 = FeatureFusionBlock(features) - self.scratch.refinenet3 = FeatureFusionBlock(features) - self.scratch.refinenet2 = FeatureFusionBlock(features) - self.scratch.refinenet1 = FeatureFusionBlock(features) - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), - nn.ReLU(True), - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - ) - - if path: - self.load(path) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) diff --git a/spaces/ka1kuk/fastapi/Dockerfile b/spaces/ka1kuk/fastapi/Dockerfile deleted file mode 100644 index 604194d0db1b589fb1ddfccc0475e45262b9feb7..0000000000000000000000000000000000000000 --- a/spaces/ka1kuk/fastapi/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -COPY . . - -RUN pip install --no-cache-dir --upgrade -r requirements.txt - -RUN useradd -m -u 1000 user -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -WORKDIR $HOME/app - -COPY --chown=user . $HOME/app - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] diff --git a/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/modules.py b/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/modules.py deleted file mode 100644 index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000 --- a/spaces/kainy/rvc_okiba_TTS/lib/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from lib.infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/kevintang513/watch-watcher/README.md b/spaces/kevintang513/watch-watcher/README.md deleted file mode 100644 index e549e04fda2cb41086c3049b75c0e34b65209bbd..0000000000000000000000000000000000000000 --- a/spaces/kevintang513/watch-watcher/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Watch Watcher -emoji: 🏢 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.1.7 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/backbones/iresnet2060.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/backbones/iresnet2060.py deleted file mode 100644 index 21d1122144d207637d2444cba1f68fe630c89f31..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/backbones/iresnet2060.py +++ /dev/null @@ -1,176 +0,0 @@ -import torch -from torch import nn - -assert torch.__version__ >= "1.8.1" -from torch.utils.checkpoint import checkpoint_sequential - -__all__ = ['iresnet2060'] - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - groups=groups, - bias=False, - dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, - out_planes, - kernel_size=1, - stride=stride, - bias=False) - - -class IBasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, - groups=1, base_width=64, dilation=1): - super(IBasicBlock, self).__init__() - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, ) - self.conv1 = conv3x3(inplanes, planes) - self.bn2 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.prelu = nn.PReLU(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn3 = nn.BatchNorm2d(planes, eps=1e-05, ) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - out = self.bn1(x) - out = self.conv1(out) - out = self.bn2(out) - out = self.prelu(out) - out = self.conv2(out) - out = self.bn3(out) - if self.downsample is not None: - identity = self.downsample(x) - out += identity - return out - - -class IResNet(nn.Module): - fc_scale = 7 * 7 - - def __init__(self, - block, layers, dropout=0, num_features=512, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False): - super(IResNet, self).__init__() - self.fp16 = fp16 - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05) - self.prelu = nn.PReLU(self.inplanes) - self.layer1 = self._make_layer(block, 64, layers[0], stride=2) - self.layer2 = self._make_layer(block, - 128, - layers[1], - stride=2, - dilate=replace_stride_with_dilation[0]) - self.layer3 = self._make_layer(block, - 256, - layers[2], - stride=2, - dilate=replace_stride_with_dilation[1]) - self.layer4 = self._make_layer(block, - 512, - layers[3], - stride=2, - dilate=replace_stride_with_dilation[2]) - self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05, ) - self.dropout = nn.Dropout(p=dropout, inplace=True) - self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features) - self.features = nn.BatchNorm1d(num_features, eps=1e-05) - nn.init.constant_(self.features.weight, 1.0) - self.features.weight.requires_grad = False - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.normal_(m.weight, 0, 0.1) - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - if zero_init_residual: - for m in self.modules(): - if isinstance(m, IBasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False): - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ), - ) - layers = [] - layers.append( - block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(self.inplanes, - planes, - groups=self.groups, - base_width=self.base_width, - dilation=self.dilation)) - - return nn.Sequential(*layers) - - def checkpoint(self, func, num_seg, x): - if self.training: - return checkpoint_sequential(func, num_seg, x) - else: - return func(x) - - def forward(self, x): - with torch.cuda.amp.autocast(self.fp16): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.layer1(x) - x = self.checkpoint(self.layer2, 20, x) - x = self.checkpoint(self.layer3, 100, x) - x = self.layer4(x) - x = self.bn2(x) - x = torch.flatten(x, 1) - x = self.dropout(x) - x = self.fc(x.float() if self.fp16 else x) - x = self.features(x) - return x - - -def _iresnet(arch, block, layers, pretrained, progress, **kwargs): - model = IResNet(block, layers, **kwargs) - if pretrained: - raise ValueError() - return model - - -def iresnet2060(pretrained=False, progress=True, **kwargs): - return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs) diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/utils/preprocess.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/utils/preprocess.py deleted file mode 100644 index 0f784e6c3d8562e1db1bbd850b9f01843cee3c97..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/utils/preprocess.py +++ /dev/null @@ -1,170 +0,0 @@ -import numpy as np -import cv2, os, sys, torch -from tqdm import tqdm -from PIL import Image - -# 3dmm extraction -import safetensors -import safetensors.torch -from src.face3d.util.preprocess import align_img -from src.face3d.util.load_mats import load_lm3d -from src.face3d.models import networks - -from scipy.io import loadmat, savemat -from src.utils.croper import Preprocesser - - -import warnings - -from src.utils.safetensor_helper import load_x_from_safetensor -warnings.filterwarnings("ignore") - -def split_coeff(coeffs): - """ - Return: - coeffs_dict -- a dict of torch.tensors - - Parameters: - coeffs -- torch.tensor, size (B, 256) - """ - id_coeffs = coeffs[:, :80] - exp_coeffs = coeffs[:, 80: 144] - tex_coeffs = coeffs[:, 144: 224] - angles = coeffs[:, 224: 227] - gammas = coeffs[:, 227: 254] - translations = coeffs[:, 254:] - return { - 'id': id_coeffs, - 'exp': exp_coeffs, - 'tex': tex_coeffs, - 'angle': angles, - 'gamma': gammas, - 'trans': translations - } - - -class CropAndExtract(): - def __init__(self, sadtalker_path, device): - - self.propress = Preprocesser(device) - self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device) - - if sadtalker_path['use_safetensor']: - checkpoint = safetensors.torch.load_file(sadtalker_path['checkpoint']) - self.net_recon.load_state_dict(load_x_from_safetensor(checkpoint, 'face_3drecon')) - else: - checkpoint = torch.load(sadtalker_path['path_of_net_recon_model'], map_location=torch.device(device)) - self.net_recon.load_state_dict(checkpoint['net_recon']) - - self.net_recon.eval() - self.lm3d_std = load_lm3d(sadtalker_path['dir_of_BFM_fitting']) - self.device = device - - def generate(self, input_path, save_dir, crop_or_resize='crop', source_image_flag=False, pic_size=256): - - pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] - - landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') - coeff_path = os.path.join(save_dir, pic_name+'.mat') - png_path = os.path.join(save_dir, pic_name+'.png') - - #load input - if not os.path.isfile(input_path): - raise ValueError('input_path must be a valid path to video/image file') - elif input_path.split('.')[-1] in ['jpg', 'png', 'jpeg']: - # loader for first frame - full_frames = [cv2.imread(input_path)] - fps = 25 - else: - # loader for videos - video_stream = cv2.VideoCapture(input_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - full_frames.append(frame) - if source_image_flag: - break - - x_full_frames= [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames] - - #### crop images as the - if 'crop' in crop_or_resize.lower(): # default crop - x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad) - elif 'full' in crop_or_resize.lower(): - x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad) - else: # resize mode - oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] - crop_info = ((ox2 - ox1, oy2 - oy1), None, None) - - frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames] - if len(frames_pil) == 0: - print('No face is detected in the input file') - return None, None - - # save crop info - for frame in frames_pil: - cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)) - - # 2. get the landmark according to the detected face. - if not os.path.isfile(landmarks_path): - lm = self.propress.predictor.extract_keypoint(frames_pil, landmarks_path) - else: - print(' Using saved landmarks.') - lm = np.loadtxt(landmarks_path).astype(np.float32) - lm = lm.reshape([len(x_full_frames), -1, 2]) - - if not os.path.isfile(coeff_path): - # load 3dmm paramter generator from Deep3DFaceRecon_pytorch - video_coeffs, full_coeffs = [], [] - for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'): - frame = frames_pil[idx] - W,H = frame.size - lm1 = lm[idx].reshape([-1, 2]) - - if np.mean(lm1) == -1: - lm1 = (self.lm3d_std[:, :2]+1)/2. - lm1 = np.concatenate( - [lm1[:, :1]*W, lm1[:, 1:2]*H], 1 - ) - else: - lm1[:, -1] = H - 1 - lm1[:, -1] - - trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std) - - trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32) - im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0) - - with torch.no_grad(): - full_coeff = self.net_recon(im_t) - coeffs = split_coeff(full_coeff) - - pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs} - - pred_coeff = np.concatenate([ - pred_coeff['exp'], - pred_coeff['angle'], - pred_coeff['trans'], - trans_params[2:][None], - ], 1) - video_coeffs.append(pred_coeff) - full_coeffs.append(full_coeff.cpu().numpy()) - - semantic_npy = np.array(video_coeffs)[:,0] - - savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]}) - - return coeff_path, png_path, crop_info diff --git a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/utils/videoio.py b/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/utils/videoio.py deleted file mode 100644 index d16ee667713a16e3f9644fcc3cb3e023bc2c9102..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-VC-SadTalker/src/utils/videoio.py +++ /dev/null @@ -1,41 +0,0 @@ -import shutil -import uuid - -import os - -import cv2 - -def load_video_to_cv2(input_path): - video_stream = cv2.VideoCapture(input_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - full_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - return full_frames - -def save_video_with_watermark(video, audio, save_path, watermark=False): - temp_file = str(uuid.uuid4())+'.mp4' - cmd = r'ffmpeg -y -hide_banner -loglevel error -i "%s" -i "%s" -vcodec mpeg4 "%s"' % (video, audio, temp_file) - os.system(cmd) - - if watermark is False: - shutil.move(temp_file, save_path) - else: - # watermark - try: - ##### check if stable-diffusion-webui - import webui - from modules import paths - watarmark_path = paths.script_path+"/extensions/SadTalker/docs/sadtalker_logo.png" - except: - # get the root path of sadtalker. - dir_path = os.path.dirname(os.path.realpath(__file__)) - watarmark_path = dir_path+"/../../docs/sadtalker_logo.png" - - cmd = r'ffmpeg -y -hide_banner -loglevel error -i "%s" -i "%s" -filter_complex "[1]scale=100:-1[wm];[0][wm]overlay=(main_w-overlay_w)-10:10" "%s"' % (temp_file, watarmark_path, save_path) - os.system(cmd) - os.remove(temp_file) \ No newline at end of file diff --git a/spaces/kevinwang676/M4Singer/modules/parallel_wavegan/stft_loss.py b/spaces/kevinwang676/M4Singer/modules/parallel_wavegan/stft_loss.py deleted file mode 100644 index 229e6c777dc9ec7f710842d1e648dba1189ec8b4..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/M4Singer/modules/parallel_wavegan/stft_loss.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" -import librosa -import torch - -from modules.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", - use_mel_loss=False): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - self.use_mel_loss = use_mel_loss - self.mel_basis = None - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - - """ - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - if self.use_mel_loss: - if self.mel_basis is None: - self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T - x_mag = x_mag @ self.mel_basis - y_mag = y_mag @ self.mel_basis - - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window", - use_mel_loss=False): - """Initialize Multi resolution STFT loss module. - - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)] - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return sc_loss, mag_loss diff --git a/spaces/kevinwang676/VoiceChanger/launcher.py b/spaces/kevinwang676/VoiceChanger/launcher.py deleted file mode 100644 index 17ce9f1a18c3d563333bbb0eacc2922fb8524e3f..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/launcher.py +++ /dev/null @@ -1,204 +0,0 @@ -# this scripts installs necessary requirements and launches main program in webui.py -# borrow from : https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/launch.py -import subprocess -import os -import sys -import importlib.util -import shlex -import platform -import json - -python = sys.executable -git = os.environ.get('GIT', "git") -index_url = os.environ.get('INDEX_URL', "") -stored_commit_hash = None -skip_install = False -dir_repos = "repositories" -script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - -if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: - os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' - - -def check_python_version(): - is_windows = platform.system() == "Windows" - major = sys.version_info.major - minor = sys.version_info.minor - micro = sys.version_info.micro - - if is_windows: - supported_minors = [10] - else: - supported_minors = [7, 8, 9, 10, 11] - - if not (major == 3 and minor in supported_minors): - - raise (f""" -INCOMPATIBLE PYTHON VERSION -This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}. -If you encounter an error with "RuntimeError: Couldn't install torch." message, -or any other error regarding unsuccessful package (library) installation, -please downgrade (or upgrade) to the latest version of 3.10 Python -and delete current Python and "venv" folder in WebUI's directory. -You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/ -{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} -Use --skip-python-version-check to suppress this warning. -""") - - -def commit_hash(): - global stored_commit_hash - - if stored_commit_hash is not None: - return stored_commit_hash - - try: - stored_commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - stored_commit_hash = "" - - return stored_commit_hash - - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode}""") - - return "" - - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - - if result.returncode != 0: - - message = f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode} -stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} -stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} -""" - raise RuntimeError(message) - - return result.stdout.decode(encoding="utf8", errors="ignore") - - -def check_run(command): - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - return result.returncode == 0 - - -def is_installed(package): - try: - spec = importlib.util.find_spec(package) - except ModuleNotFoundError: - return False - - return spec is not None - - -def repo_dir(name): - return os.path.join(script_path, dir_repos, name) - - -def run_python(code, desc=None, errdesc=None): - return run(f'"{python}" -c "{code}"', desc, errdesc) - - -def run_pip(args, desc=None): - if skip_install: - return - - index_url_line = f' --index-url {index_url}' if index_url != '' else '' - return run(f'"{python}" -m pip {args} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}") - - -def check_run_python(code): - return check_run(f'"{python}" -c "{code}"') - - -def git_clone(url, dir, name, commithash=None): - # TODO clone into temporary dir and move if successful - - if os.path.exists(dir): - if commithash is None: - return - - current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() - if current_hash == commithash: - return - - run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") - return - - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") - - if commithash is not None: - run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") - - -def git_pull_recursive(dir): - for subdir, _, _ in os.walk(dir): - if os.path.exists(os.path.join(subdir, '.git')): - try: - output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash']) - print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n") - except subprocess.CalledProcessError as e: - print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n") - - -def run_extension_installer(extension_dir): - path_installer = os.path.join(extension_dir, "install.py") - if not os.path.isfile(path_installer): - return - - try: - env = os.environ.copy() - env['PYTHONPATH'] = os.path.abspath(".") - - print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env)) - except Exception as e: - print(e, file=sys.stderr) - - -def prepare_environment(): - global skip_install - - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113") - - ## check windows - if sys.platform != 'win32': - requirements_file = os.environ.get('REQS_FILE', "req.txt") - else: - requirements_file = os.environ.get('REQS_FILE', "requirements.txt") - - commit = commit_hash() - - print(f"Python {sys.version}") - print(f"Commit hash: {commit}") - - if not is_installed("torch") or not is_installed("torchvision"): - run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) - - run_pip(f"install -r \"{requirements_file}\"", "requirements for SadTalker WebUI (may take longer time in first time)") - - if sys.platform != 'win32' and not is_installed('tts'): - run_pip(f"install TTS", "install TTS individually in SadTalker, which might not work on windows.") - - -def start(): - print(f"Launching SadTalker Web UI") - from app_sadtalker import sadtalker_demo - demo = sadtalker_demo() - demo.queue() - demo.launch() - -if __name__ == "__main__": - prepare_environment() - start() \ No newline at end of file diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/roberta/README.glue.md b/spaces/koajoel/PolyFormer/fairseq/examples/roberta/README.glue.md deleted file mode 100644 index 4f596d55af99fba3cdf58b1d5ff3d8f8dbf4383d..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/roberta/README.glue.md +++ /dev/null @@ -1,64 +0,0 @@ -# Finetuning RoBERTa on GLUE tasks - -### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using following commands: -```bash -wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py -python download_glue_data.py --data_dir glue_data --tasks all -``` - -### 2) Preprocess GLUE task data: -```bash -./examples/roberta/preprocess_GLUE_tasks.sh glue_data -``` -`glue_task_name` is one of the following: -`{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}` -Use `ALL` for preprocessing all the glue tasks. - -### 3) Fine-tuning on GLUE task: -Example fine-tuning cmd for `RTE` task -```bash -ROBERTA_PATH=/path/to/roberta/model.pt - -CUDA_VISIBLE_DEVICES=0 fairseq-hydra-train -config-dir examples/roberta/config/finetuning --config-name rte \ -task.data=RTE-bin checkpoint.restore_file=$ROBERTA_PATH -``` - -There are additional config files for each of the GLUE tasks in the examples/roberta/config/finetuning directory. - -**Note:** - -a) Above cmd-args and hyperparams are tested on one Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`. - -b) All the settings in above table are suggested settings based on our hyperparam search within a fixed search space (for careful comparison across models). You might be able to find better metrics with wider hyperparam search. - -### Inference on GLUE task -After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using following python code snippet: - -```python -from fairseq.models.roberta import RobertaModel - -roberta = RobertaModel.from_pretrained( - 'checkpoints/', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='RTE-bin' -) - -label_fn = lambda label: roberta.task.label_dictionary.string( - [label + roberta.task.label_dictionary.nspecial] -) -ncorrect, nsamples = 0, 0 -roberta.cuda() -roberta.eval() -with open('glue_data/RTE/dev.tsv') as fin: - fin.readline() - for index, line in enumerate(fin): - tokens = line.strip().split('\t') - sent1, sent2, target = tokens[1], tokens[2], tokens[3] - tokens = roberta.encode(sent1, sent2) - prediction = roberta.predict('sentence_classification_head', tokens).argmax().item() - prediction_label = label_fn(prediction) - ncorrect += int(prediction_label == target) - nsamples += 1 -print('| Accuracy: ', float(ncorrect)/float(nsamples)) - -``` diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/modules/squeeze_excitation.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/modules/squeeze_excitation.py deleted file mode 100644 index d1d902bb30c071acbc0fa919a134c80fed86bd6c..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/modules/squeeze_excitation.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch.nn as nn - - -class SELayer(nn.Module): - def __init__(self, channel, reduction=16): - super(SELayer, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction, bias=False), - nn.ReLU(inplace=True), - nn.Linear(channel // reduction, channel, bias=False), - nn.Sigmoid() - ) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - res = x * y.expand_as(x) - return res diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_inline/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_inline/__init__.py deleted file mode 100644 index f27907ce5133c345f686143c155f3e8523ab46d5..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/markdown_it/rules_inline/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -__all__ = ( - "StateInline", - "text", - "text_collapse", - "link_pairs", - "escape", - "newline", - "backtick", - "emphasis", - "image", - "link", - "autolink", - "entity", - "html_inline", - "strikethrough", -) -from . import emphasis, strikethrough -from .autolink import autolink -from .backticks import backtick -from .balance_pairs import link_pairs -from .entity import entity -from .escape import escape -from .html_inline import html_inline -from .image import image -from .link import link -from .newline import newline -from .state_inline import StateInline -from .text import text -from .text_collapse import text_collapse diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_srmd.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_srmd.py deleted file mode 100644 index 4c414b236ac5986ff9ee3aea651d8ea433047ece..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/models/network_srmd.py +++ /dev/null @@ -1,81 +0,0 @@ - -import torch.nn as nn -import models.basicblock as B -import torch - -""" -# -------------------------------------------- -# SRMD (15 conv layers) -# -------------------------------------------- -Reference: -@inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} -} -http://openaccess.thecvf.com/content_cvpr_2018/papers/Zhang_Learning_a_Single_CVPR_2018_paper.pdf -""" - - -# -------------------------------------------- -# SRMD (SRMD, in_nc = 3+15+1 = 19) -# SRMD (SRMDNF, in_nc = 3+15 = 18) -# -------------------------------------------- -class SRMD(nn.Module): - def __init__(self, in_nc=19, out_nc=3, nc=128, nb=12, upscale=4, act_mode='R', upsample_mode='pixelshuffle'): - """ - # ------------------------------------ - in_nc: channel number of input, default: 3+15 - out_nc: channel number of output - nc: channel number - nb: total number of conv layers - upscale: scale factor - act_mode: batch norm + activation function; 'BR' means BN+ReLU - upsample_mode: default 'pixelshuffle' = conv + pixelshuffle - # ------------------------------------ - """ - super(SRMD, self).__init__() - assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL' - bias = True - - if upsample_mode == 'upconv': - upsample_block = B.upsample_upconv - elif upsample_mode == 'pixelshuffle': - upsample_block = B.upsample_pixelshuffle - elif upsample_mode == 'convtranspose': - upsample_block = B.upsample_convtranspose - else: - raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode)) - - m_head = B.conv(in_nc, nc, mode='C'+act_mode[-1], bias=bias) - m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)] - m_tail = upsample_block(nc, out_nc, mode=str(upscale), bias=bias) - - self.model = B.sequential(m_head, *m_body, m_tail) - -# def forward(self, x, k_pca): -# m = k_pca.repeat(1, 1, x.size()[-2], x.size()[-1]) -# x = torch.cat((x, m), 1) -# x = self.body(x) - - def forward(self, x): - - x = self.model(x) - - return x - - -if __name__ == '__main__': - from utils import utils_model - model = SRMD(in_nc=18, out_nc=3, nc=64, nb=15, upscale=4, act_mode='R', upsample_mode='pixelshuffle') - print(utils_model.describe_model(model)) - - x = torch.randn((2, 3, 100, 100)) - k_pca = torch.randn(2, 15, 1, 1) - x = model(x, k_pca) - print(x.shape) - - # run models/network_srmd.py - diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/elevenlabs_tts/script.py b/spaces/leogabraneth/text-generation-webui-main/extensions/elevenlabs_tts/script.py deleted file mode 100644 index af0c74582e5be9d31effff28328b5b848554bc05..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/elevenlabs_tts/script.py +++ /dev/null @@ -1,197 +0,0 @@ -import html -import re -from pathlib import Path - -import elevenlabs -import gradio as gr - -from modules import chat, shared, ui_chat -from modules.logging_colors import logger -from modules.utils import gradio - -params = { - 'activate': True, - 'api_key': None, - 'selected_voice': 'None', - 'autoplay': False, - 'show_text': True, - 'model': 'eleven_monolingual_v1', -} - -voices = None -wav_idx = 0 -LANG_MODELS = ['eleven_monolingual_v1', 'eleven_multilingual_v1'] - - -def update_api_key(key): - params['api_key'] = key - if key is not None: - elevenlabs.set_api_key(key) - - -def refresh_voices(): - global params - your_voices = elevenlabs.voices() - voice_names = [voice.name for voice in your_voices] - return voice_names - - -def refresh_voices_dd(): - all_voices = refresh_voices() - return gr.Dropdown.update(value=all_voices[0], choices=all_voices) - - -def remove_tts_from_history(history): - for i, entry in enumerate(history['internal']): - history['visible'][i] = [history['visible'][i][0], entry[1]] - - return history - - -def toggle_text_in_history(history): - for i, entry in enumerate(history['visible']): - visible_reply = entry[1] - if visible_reply.startswith('')[0]}\n\n{reply}"] - else: - history['visible'][i] = [history['visible'][i][0], f"{visible_reply.split('')[0]}"] - - return history - - -def remove_surrounded_chars(string): - # this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR - # 'as few symbols as possible (0 upwards) between an asterisk and the end of the string' - return re.sub('\*[^\*]*?(\*|$)', '', string) - - -def state_modifier(state): - if not params['activate']: - return state - - state['stream'] = False - return state - - -def input_modifier(string): - if not params['activate']: - return string - - shared.processing_message = "*Is recording a voice message...*" - return string - - -def history_modifier(history): - # Remove autoplay from the last reply - if len(history['internal']) > 0: - history['visible'][-1] = [ - history['visible'][-1][0], - history['visible'][-1][1].replace('controls autoplay>', 'controls>') - ] - - return history - - -def output_modifier(string): - global params, wav_idx - - if not params['activate']: - return string - - original_string = string - string = remove_surrounded_chars(string) - string = string.replace('"', '') - string = string.replace('“', '') - string = string.replace('\n', ' ') - string = string.strip() - if string == '': - string = 'empty reply, try regenerating' - - output_file = Path(f'extensions/elevenlabs_tts/outputs/{wav_idx:06d}.mp3'.format(wav_idx)) - print(f'Outputting audio to {str(output_file)}') - try: - audio = elevenlabs.generate(text=html.unescape(string), voice=params['selected_voice'], model=params['model']) - elevenlabs.save(audio, str(output_file)) - - autoplay = 'autoplay' if params['autoplay'] else '' - string = f'' - wav_idx += 1 - except elevenlabs.api.error.UnauthenticatedRateLimitError: - string = "🤖 ElevenLabs Unauthenticated Rate Limit Reached - Please create an API key to continue\n\n" - except elevenlabs.api.error.RateLimitError: - string = "🤖 ElevenLabs API Tier Limit Reached\n\n" - except elevenlabs.api.error.APIError as err: - string = f"🤖 ElevenLabs Error: {err}\n\n" - - if params['show_text']: - string += f'\n\n{original_string}' - - shared.processing_message = "*Is typing...*" - return string - - -def ui(): - global voices - if not voices: - voices = refresh_voices() - selected = params['selected_voice'] - if selected == 'None': - params['selected_voice'] = voices[0] - elif selected not in voices: - logger.error(f'Selected voice {selected} not available, switching to {voices[0]}') - params['selected_voice'] = voices[0] - - # Gradio elements - with gr.Row(): - activate = gr.Checkbox(value=params['activate'], label='Activate TTS') - autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically') - show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player') - - with gr.Row(): - voice = gr.Dropdown(value=params['selected_voice'], choices=voices, label='TTS Voice') - refresh = gr.Button(value='Refresh') - - with gr.Row(): - if params['api_key']: - api_key = gr.Textbox(value=params['api_key'], label='API Key') - update_api_key(params['api_key']) - else: - api_key = gr.Textbox(placeholder="Enter your API key.", label='API Key') - - with gr.Row(): - model = gr.Dropdown(value=params['model'], choices=LANG_MODELS, label='Language model') - - with gr.Row(): - convert = gr.Button('Permanently replace audios with the message texts') - convert_cancel = gr.Button('Cancel', visible=False) - convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False) - - # Convert history with confirmation - convert_arr = [convert_confirm, convert, convert_cancel] - convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr) - convert_confirm.click( - lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then( - remove_tts_from_history, gradio('history'), gradio('history')).then( - chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then( - chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display')) - - convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) - - # Toggle message text in history - show_text.change( - lambda x: params.update({"show_text": x}), show_text, None).then( - toggle_text_in_history, gradio('history'), gradio('history')).then( - chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then( - chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display')) - - # Event functions to update the parameters in the backend - activate.change(lambda x: params.update({'activate': x}), activate, None) - voice.change(lambda x: params.update({'selected_voice': x}), voice, None) - api_key.change(update_api_key, api_key, None) - model.change(lambda x: params.update({'model': x}), model, None) - # connect.click(check_valid_api, [], connection_status) - refresh.click(refresh_voices_dd, [], voice) - # Event functions to update the parameters in the backend - autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None) diff --git a/spaces/liaokun/web/README.md b/spaces/liaokun/web/README.md deleted file mode 100644 index 4b6446bddc2cde1295da612224ec83705a151c4e..0000000000000000000000000000000000000000 --- a/spaces/liaokun/web/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Web -emoji: ⚡ -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/limingcv/AlignDet/pretrain/selfsup_retinanet_1x_coco_pixpro/retinanet.py b/spaces/limingcv/AlignDet/pretrain/selfsup_retinanet_1x_coco_pixpro/retinanet.py deleted file mode 100644 index 628550d3c33e88cb26d25b15f60dc50853cce538..0000000000000000000000000000000000000000 --- a/spaces/limingcv/AlignDet/pretrain/selfsup_retinanet_1x_coco_pixpro/retinanet.py +++ /dev/null @@ -1,370 +0,0 @@ -model = dict( - type='SelfSupDetector', - backbone=dict( - type='SelfSupRetinaNet', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=4, - norm_cfg=dict(type='SyncBN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict( - type='Pretrained', - checkpoint= - 'pretrain/dense-level/pixpro/pixpro_resnet50_8xb128-coslr-400e_in1k.pth' - )), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_input', - num_outs=5), - bbox_head=dict( - type='SelfSupRetinaHead', - num_classes=256, - in_channels=256, - stacked_convs=4, - feat_channels=256, - init_cfg=dict( - type='Normal', layer='Conv2d', std=0.01, override=None), - loss_cls=dict( - type='ContrastiveLoss', loss_weight=1.0, temperature=0.5), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1, - gpu_assign_thr=-1), - sampler=dict( - type='RandomSampler', - num=2048, - pos_fraction=1.0, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=1, - debug=False))) -train_dataset_type = 'MultiViewCocoDataset' -test_dataset_type = 'CocoDataset' -data_root = 'data/coco/' -classes = ['selective_search'] -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -load_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=False), - dict(type='SelectTopKProposals', topk=80) -] -train_pipeline1 = [ - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(0.01, 0.01)), - dict(type='Pad', size_divisor=32), - dict(type='RandFlip', flip_ratio=0.5), - dict( - type='OneOf', - transforms=[ - dict(type='Identity'), - dict(type='AutoContrast'), - dict(type='RandEqualize'), - dict(type='RandSolarize'), - dict(type='RandColor'), - dict(type='RandContrast'), - dict(type='RandBrightness'), - dict(type='RandSharpness'), - dict(type='RandPosterize') - ]), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -train_pipeline2 = [ - dict( - type='Resize', - img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - multiscale_mode='value', - keep_ratio=True), - dict(type='FilterAnnotations', min_gt_bbox_wh=(0.01, 0.01)), - dict(type='Pad', size_divisor=32), - dict(type='RandFlip', flip_ratio=0.5), - dict( - type='OneOf', - transforms=[ - dict(type='Identity'), - dict(type='AutoContrast'), - dict(type='RandEqualize'), - dict(type='RandSolarize'), - dict(type='RandColor'), - dict(type='RandContrast'), - dict(type='RandBrightness'), - dict(type='RandSharpness'), - dict(type='RandPosterize') - ]), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='MultiViewCocoDataset', - dataset=dict( - type='CocoDataset', - classes=['selective_search'], - ann_file= - 'data/coco/filtered_proposals/train2017_ratio3size0008@0.5.json', - img_prefix='data/coco/train2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=False), - dict(type='SelectTopKProposals', topk=80) - ]), - num_views=2, - pipelines=[[{ - 'type': - 'Resize', - 'img_scale': [(1333, 640), (1333, 672), (1333, 704), (1333, 736), - (1333, 768), (1333, 800)], - 'multiscale_mode': - 'value', - 'keep_ratio': - True - }, { - 'type': 'FilterAnnotations', - 'min_gt_bbox_wh': (0.01, 0.01) - }, { - 'type': 'Pad', - 'size_divisor': 32 - }, { - 'type': 'RandFlip', - 'flip_ratio': 0.5 - }, { - 'type': - 'OneOf', - 'transforms': [{ - 'type': 'Identity' - }, { - 'type': 'AutoContrast' - }, { - 'type': 'RandEqualize' - }, { - 'type': 'RandSolarize' - }, { - 'type': 'RandColor' - }, { - 'type': 'RandContrast' - }, { - 'type': 'RandBrightness' - }, { - 'type': 'RandSharpness' - }, { - 'type': 'RandPosterize' - }] - }, { - 'type': 'Normalize', - 'mean': [123.675, 116.28, 103.53], - 'std': [58.395, 57.12, 57.375], - 'to_rgb': True - }, { - 'type': 'DefaultFormatBundle' - }, { - 'type': 'Collect', - 'keys': ['img', 'gt_bboxes', 'gt_labels'] - }], - [{ - 'type': - 'Resize', - 'img_scale': [(1333, 640), (1333, 672), (1333, 704), - (1333, 736), (1333, 768), (1333, 800)], - 'multiscale_mode': - 'value', - 'keep_ratio': - True - }, { - 'type': 'FilterAnnotations', - 'min_gt_bbox_wh': (0.01, 0.01) - }, { - 'type': 'Pad', - 'size_divisor': 32 - }, { - 'type': 'RandFlip', - 'flip_ratio': 0.5 - }, { - 'type': - 'OneOf', - 'transforms': [{ - 'type': 'Identity' - }, { - 'type': 'AutoContrast' - }, { - 'type': 'RandEqualize' - }, { - 'type': 'RandSolarize' - }, { - 'type': 'RandColor' - }, { - 'type': 'RandContrast' - }, { - 'type': 'RandBrightness' - }, { - 'type': 'RandSharpness' - }, { - 'type': 'RandPosterize' - }] - }, { - 'type': 'Normalize', - 'mean': [123.675, 116.28, 103.53], - 'std': [58.395, 57.12, 57.375], - 'to_rgb': True - }, { - 'type': 'DefaultFormatBundle' - }, { - 'type': 'Collect', - 'keys': ['img', 'gt_bboxes', 'gt_labels'] - }]]), - val=dict( - type='CocoDataset', - classes=['selective_search'], - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ]), - test=dict( - type='CocoDataset', - classes=['selective_search'], - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ])) -evaluation = dict(interval=65535, gpu_collect=True) -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -custom_hooks = [ - dict(type='MomentumUpdateHook'), - dict( - type='MMDetWandbHook', - init_kwargs=dict(project='I2B', group='pretrain'), - interval=50, - num_eval_images=0, - log_checkpoint=False) -] -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -opencv_num_threads = 0 -mp_start_method = 'fork' -auto_scale_lr = dict(enable=False, base_batch_size=16) -custom_imports = dict( - imports=[ - 'mmselfsup.datasets.pipelines', - 'selfsup.core.hook.momentum_update_hook', - 'selfsup.datasets.pipelines.selfsup_pipelines', - 'selfsup.datasets.pipelines.rand_aug', - 'selfsup.datasets.single_view_coco', - 'selfsup.datasets.multi_view_coco', - 'selfsup.models.losses.contrastive_loss', - 'selfsup.models.dense_heads.fcos_head', - 'selfsup.models.dense_heads.retina_head', - 'selfsup.models.dense_heads.detr_head', - 'selfsup.models.dense_heads.deformable_detr_head', - 'selfsup.models.roi_heads.bbox_heads.convfc_bbox_head', - 'selfsup.models.roi_heads.standard_roi_head', - 'selfsup.models.detectors.selfsup_detector', - 'selfsup.models.detectors.selfsup_fcos', - 'selfsup.models.detectors.selfsup_detr', - 'selfsup.models.detectors.selfsup_deformable_detr', - 'selfsup.models.detectors.selfsup_retinanet', - 'selfsup.models.detectors.selfsup_mask_rcnn', - 'selfsup.core.bbox.assigners.hungarian_assigner', - 'selfsup.core.bbox.assigners.pseudo_hungarian_assigner', - 'selfsup.core.bbox.match_costs.match_cost' - ], - allow_failed_imports=False) -work_dir = 'work_dirs/selfsup_retinanet_1x_coco_pixpro' -auto_resume = False -gpu_ids = range(0, 8) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Ansys Fluent 14.5 Torrent ((NEW)) Download With 132.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Ansys Fluent 14.5 Torrent ((NEW)) Download With 132.md deleted file mode 100644 index 84da26adede37a0f15ca9fa17321a34c377956dc..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Ansys Fluent 14.5 Torrent ((NEW)) Download With 132.md +++ /dev/null @@ -1,6 +0,0 @@ -

              ansys fluent 14.5 torrent download with 132


              DOWNLOAD 🔗 https://bytlly.com/2uGwoO



              -
              -/96. BX Na+ (mmol/L) 111.5 Na+: K+: Cl 1:1:1 4fefd39f24
              -
              -
              -

              diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Hard Disk Sentinel Pro 5.30.6 Build 9417 Crack Key.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Hard Disk Sentinel Pro 5.30.6 Build 9417 Crack Key.md deleted file mode 100644 index f9f99ea5c67cb86fe16ada7d35cd42c1af1b4055..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Hard Disk Sentinel Pro 5.30.6 Build 9417 Crack Key.md +++ /dev/null @@ -1,6 +0,0 @@ -

              Hard Disk Sentinel Pro 5.30.6 Build 9417 Crack Key


              DOWNLOAD ✒ ✒ ✒ https://bytlly.com/2uGvTG



              -
              -This was a good job, but no room for advancement and the drive was too far, especially when you were ... Hard Disk Sentinel Pro 5.30.6 Build 9417 Crack & Key. 1fdad05405
              -
              -
              -

              diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Hitman Absolution V 1.0 433.1 Trainer By Fling ((HOT)).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Hitman Absolution V 1.0 433.1 Trainer By Fling ((HOT)).md deleted file mode 100644 index ddafe3a7f65eb3d7e77f36477904e6fff66bc300..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Hitman Absolution V 1.0 433.1 Trainer By Fling ((HOT)).md +++ /dev/null @@ -1,108 +0,0 @@ -## Hitman Absolution V 1.0 433.1 Trainer By Fling - - - - - - - - - -**DOWNLOAD ->->->-> [https://fienislile.blogspot.com/?download=2txySU](https://fienislile.blogspot.com/?download=2txySU)** - - - - - - - - - - - - - -# How to Use Hitman Absolution V 1.0 433.1 Trainer By Fling - - - -Hitman Absolution is a stealth action game that lets you play as Agent 47, a professional assassin who can use various weapons, disguises and gadgets to complete his missions. However, if you want to make the game easier or more fun, you can use a trainer that modifies some aspects of the game, such as health, ammo, detection and more. - - - -One of the trainers available for Hitman Absolution is made by Fling, a well-known trainer maker who has created trainers for many other games. This trainer works with version 1.0 433.1 of the game and has 10 options that you can activate by pressing certain keys on your keyboard. Here are the options and their effects: - - - -- Numpad 1: Infinite Health - You won't die from any damage. - -- Numpad 2: Infinite Ammo - You won't run out of bullets or grenades. - -- Numpad 3: No Reload - You don't need to reload your weapons. - -- Numpad 4: Infinite Instinct - You can use your instinct mode as much as you want. - -- Numpad 5: No Detection - You won't be spotted by enemies or cameras. - -- Numpad 6: Silent Kill - You can kill enemies silently without alerting others. - -- Numpad 7: Super Accuracy - Your shots will always hit the target. - -- Numpad 8: Rapid Fire - Your weapons will fire faster. - -- Numpad 9: No Recoil - Your weapons won't have any recoil. - -- Numpad 0: One Hit Kill - You can kill enemies with one shot or melee attack. - - - -To use this trainer, you need to follow these steps: - - - -1. Download the trainer from one of the web search results[^2^] [^3^]. - -2. Unzip the trainer file using a software like 7-Zip or WinRAR. - -3. Run the trainer as administrator before launching the game. - -4. Press F1 at the main menu of the game to activate the trainer. - -5. Start a new game or load a saved game. - -6. Press the numpad keys to toggle the options on or off during gameplay. - - - -Note: The trainer may not work with other versions of the game or after updating it. It may also cause some glitches or crashes, so use it at your own risk. It is recommended to disable your antivirus software before using the trainer, as some antivirus programs may detect it as a virus or malware. The trainer is only for single-player mode and should not be used online or in multiplayer mode, as it may result in a ban or other consequences. - - - -## Tips and Tricks for Playing Hitman Absolution - - - -Hitman Absolution is a game that requires you to be stealthy, creative and adaptable. You can approach each mission in different ways, using various weapons, disguises and tactics. However, some methods are more effective than others, and some challenges are harder than they seem. Here are some tips and tricks that can help you become a better assassin and master the game. - - - -- Use your instinct mode wisely. Instinct mode allows you to see enemies through walls, predict their movements, blend in with crowds and activate point shooting. However, it is a limited resource that depletes quickly and regenerates slowly. You should use it sparingly and only when necessary, such as when you need to avoid detection or take out multiple enemies quickly. - -- Explore your surroundings. Each level in Hitman Absolution is full of hidden paths, items, weapons and opportunities. You should always look around for alternative routes, useful objects and environmental hazards that you can use to your advantage. For example, you can find disguises that allow you to access restricted areas, weapons that suit your play style, or objects that can distract or kill enemies. - -- Be careful with your actions. Everything you do in Hitman Absolution has consequences. If you kill someone, you need to hide the body or risk raising an alarm. If you use a loud weapon, you will attract attention and lose your score. If you wear a disguise, you will be suspicious to people who know the real person. You should always think before you act and weigh the risks and rewards of your choices. - -- Complete challenges. Challenges are optional objectives that reward you with points and unlock new abilities, weapons and disguises. They also add replay value to the game and encourage you to try different approaches and strategies. You can view the challenges for each level in the pause menu or the main menu. Some challenges are easy to complete, while others require more skill and creativity. - -- Watch online videos. If you are stuck on a particular level or challenge, or if you want to see how other players have completed the game, you can watch online videos on YouTube or other platforms. There are many videos that show different ways to complete each mission, achieve silent assassin rating, or find Easter eggs and secrets. You can learn a lot from watching other players' techniques and tips. - - - -Hitman Absolution is a game that offers a lot of freedom and fun for players who enjoy stealth action games. By following these tips and tricks, you can improve your skills and enjoy the game even more. - - dfd1c89656 - - - - - diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Logo Tiger Plus Full Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Logo Tiger Plus Full Crack.md deleted file mode 100644 index 78796139abcd57c19f1dc10f36dcbaa2eb8c8320..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Logo Tiger Plus Full Crack.md +++ /dev/null @@ -1,116 +0,0 @@ - -

              Logo Tiger Plus Full Crack: The Ultimate Logo Design Software

              - -

              If you are looking for a logo design software that is easy to use, powerful, and affordable, then you should check out Logo Tiger Plus Full Crack. Logo Tiger Plus is a software that allows you to create professional logos in minutes, without any design skills or experience. You can choose from thousands of templates, icons, fonts, and colors, or create your own logo from scratch. You can also edit and customize your logo as much as you want, until you are satisfied with the result. Logo Tiger Plus Full Crack also lets you export your logo in various formats, such as PNG, JPG, SVG, PDF, and more. You can use your logo for any purpose, such as websites, social media, business cards, flyers, banners, and more.

              -

              logo tiger plus full crack


              Download Zip ✺✺✺ https://bytlly.com/2uGvYJ



              - -

              Why You Should Use Logo Tiger Plus Full Crack

              - -

              Logo Tiger Plus Full Crack is not just another logo maker software. It is a software that offers many benefits and features that make it stand out from the rest. Here are some of the reasons why you should use Logo Tiger Plus Full Crack:

              - -
                -
              • It is easy to use. You don't need any design skills or experience to use Logo Tiger Plus Full Crack. You can create a logo in minutes by following a few simple steps. You can also watch tutorials and guides on how to use the software.
              • -
              • It is powerful. Logo Tiger Plus Full Crack has a lot of tools and options that allow you to create any logo you want. You can adjust the size, shape, color, gradient, shadow, outline, and more of your logo elements. You can also add effects, such as glow, emboss, bevel, and more. You can also use layers to organize your logo elements and apply changes to multiple elements at once.
              • -
              • It is affordable. Logo Tiger Plus Full Crack is a one-time purchase software that does not require any subscription or monthly fees. You can use it for as long as you want, without any limitations or restrictions. You can also get free updates and support from the developers.
              • -
              • It is compatible. Logo Tiger Plus Full Crack works on Windows operating systems, such as Windows 7, 8, 10, and more. It also supports multiple languages, such as English, Turkish, German, French, Spanish, and more. You can also import and export your logo in various formats, such as PNG, JPG, SVG, PDF, and more.
              • -
              - -

              How to Download and Install Logo Tiger Plus Full Crack

              - -

              If you want to download and install Logo Tiger Plus Full Crack on your computer, you can follow these steps:

              - -
                -
              1. Go to the official website of Logo Tiger Plus Full Crack and click on the download button.
              2. -
              3. Choose the version that suits your operating system and click on the download link.
              4. -
              5. Save the file on your computer and run it as an administrator.
              6. -
              7. Follow the instructions on the screen to complete the installation process.
              8. -
              9. Launch the software and enjoy creating logos with Logo Tiger Plus Full Crack.
              10. -
              - -

              Conclusion

              - -

              Logo Tiger Plus Full Crack is a logo design software that is easy to use, powerful, and affordable. It allows you to create professional logos in minutes, without any design skills or experience. You can choose from thousands of templates, icons, fonts, and colors, or create your own logo from scratch. You can also edit and customize your logo as much as you want, until you are satisfied with the result. Logo Tiger Plus Full Crack also lets you export your logo in various formats, such as PNG, JPG, SVG, -PDF

              -

              - -

              Logo Tiger Plus Full Crack Reviews

              - -

              Logo Tiger Plus Full Crack has received many positive reviews from users who have tried it. Here are some of the testimonials from satisfied customers:

              - -
              -

              "I love Logo Tiger Plus Full Crack. It is so easy to use and it has everything I need to create a logo for my business. I was able to create a logo in minutes and it looks amazing. I highly recommend this software to anyone who needs a logo." - John Smith, entrepreneur

              -
              - -
              -

              "Logo Tiger Plus Full Crack is a great software for logo design. It has a lot of templates, icons, fonts, and colors to choose from. I can also customize my logo as much as I want and export it in different formats. It is very affordable and worth every penny." - Lisa Jones, blogger

              -

              -
              - -
              -

              "Logo Tiger Plus Full Crack is a powerful tool for logo design and branding. It has a lot of features and options that allow me to create any logo I want. I can also add effects, such as glow, emboss, bevel, and more. It is compatible with Windows and supports multiple languages. It is the best logo maker software I have ever used." - Mark Lee, designer

              -
              - -

              FAQs about Logo Tiger Plus Full Crack

              - -

              If you have any questions about Logo Tiger Plus Full Crack, you can check out these frequently asked questions:

              - -

              What is Logo Tiger Plus Full Crack?

              - -

              Logo Tiger Plus Full Crack is a logo design software that allows you to create professional logos in minutes, without any design skills or experience. You can choose from thousands of templates, icons, fonts, and colors, or create your own logo from scratch. You can also edit and customize your logo as much as you want, until you are satisfied with the result. Logo Tiger Plus Full Crack also lets you export your logo in various formats, such as PNG, JPG, SVG, -PDF -

              - -

              How to use Logo Tiger Plus Full Crack?

              - -

              To use Logo Tiger Plus Full Crack, you need to follow these steps:

              - -
                -
              1. Launch the software and choose a template or start from scratch.
              2. -
              3. Add icons, text, shapes, and other elements to your logo.
              4. -
              5. Adjust the size, color, gradient, shadow, outline, and more of your logo elements.
              6. -
              7. Add effects, such as glow, emboss, bevel, and more to your logo.
              8. -
              9. Save and export your logo in various formats, such as PNG, JPG, SVG, -PDF -
              10. -
              - -

              Is Logo Tiger Plus Full Crack safe and legal?

              - -

              Logo Tiger Plus Full Crack is a safe and legal software that does not contain any viruses, malware, or spyware. It is also a licensed software that does not violate any copyrights or trademarks. However, you should always download and install Logo Tiger Plus Full Crack from the official website or a trusted source, to avoid any risks or problems.

              - -

              What are the alternatives to Logo Tiger Plus Full Crack?

              - -

              If you are looking for other logo design software that are similar to Logo Tiger Plus Full Crack, you can check out these alternatives:

              - -
                -
              • Logo Maker: A free online logo maker that allows you to create logos in minutes. You can choose from hundreds of templates, icons, fonts, and colors, or upload your own images. You can also edit and customize your logo as you like, and download it in high-resolution formats.
              • -
              • Canva: A popular online graphic design platform that offers a logo maker feature. You can create logos for any industry or niche, using thousands of templates, icons, fonts, and colors. You can also add your own images, text, and shapes, and adjust the size, color, transparency, and more of your logo elements. You can download your logo in various formats, such as PNG, JPG, PDF
              • -
              • DesignEvo: A professional online logo maker that allows you to create logos in three easy steps. You can choose from over 10,000 templates, millions of icons, hundreds of fonts, and various colors. You can also customize your logo with various effects, such as shadow, glow, outline You can download your logo in high-quality formats, such as PNG, -JPG -
              • -
              - -

              Conclusion

              - -

              Logo Tiger Plus Full Crack is a logo design software that is easy to use, powerful, and affordable. It allows you to create professional logos in minutes, without any design skills or experience. You can choose from thousands of templates, icons, fonts, and colors, or create your own logo from scratch. You can also edit and customize your logo as much as you want, until you are satisfied with the result. Logo Tiger Plus Full Crack also lets you export your logo in various formats, such as PNG, JPG, SVG, -PDF -, and more. You can use your logo for any purpose, such as websites, social media, business cards, flyers, banners, and more.

              - -

              If you are looking for a logo design software that is easy to use, powerful, and affordable, then you should check out Logo Tiger Plus Full Crack. You can download and install it from the official website or a trusted source, and enjoy creating logos with Logo Tiger Plus Full Crack.

              -

              Logo Tiger Plus Full Crack is a software that offers many benefits and features that make it stand out from the rest. Here are some of the reasons why you should use Logo Tiger Plus Full Crack:

              - -
                -
              • It is easy to use. You don't need any design skills or experience to use Logo Tiger Plus Full Crack. You can create a logo in minutes by following a few simple steps. You can also watch tutorials and guides on how to use the software.
              • -
              • It is powerful. Logo Tiger Plus Full Crack has a lot of tools and options that allow you to create any logo you want. You can adjust the size, shape, color, gradient, shadow, outline, and more of your logo elements. You can also add effects, such as glow, emboss, bevel, and more. You can also use layers to organize your logo elements and apply changes to multiple elements at once.
              • -
              • It is affordable. Logo Tiger Plus Full Crack is a one-time purchase software that does not require any subscription or monthly fees. You can use it for as long as you want, without any limitations or restrictions. You can also get free updates and support from the developers.
              • -
              • It is compatible. Logo Tiger Plus Full Crack works on Windows operating systems, such as Windows 7, 8, 10, and more. It also supports multiple languages, such as English, Turkish, German, French, Spanish, and more. You can also import and export your logo in various formats, such as PNG, -JPG -

                Logo Tiger Plus Full Crack is a logo design software that is easy to use, powerful, and affordable. It allows you to create professional logos in minutes, without any design skills or experience. You can choose from thousands of templates, icons, fonts, and colors, or create your own logo from scratch. You can also edit and customize your logo as much as you want, until you are satisfied with the result. Logo Tiger Plus Full Crack also lets you export your logo in various formats, such as PNG, -JPG -, SVG, -PDF -, and more. You can use your logo for any purpose, such as websites, social media, business cards, flyers, banners, and more.

                - -

                If you are looking for a logo design software that is easy to use, powerful, and affordable, then you should check out Logo Tiger Plus Full Crack. You can download and install it from the official website or a trusted source, and enjoy creating logos with Logo Tiger Plus Full Crack.

                3cee63e6c2
                -
                -
                \ No newline at end of file diff --git a/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/ssplit_tokenzier.py b/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/ssplit_tokenzier.py deleted file mode 100644 index 71a090898f9e7d17cd64754416a83b443c7e8fc1..0000000000000000000000000000000000000000 --- a/spaces/lingbionlp/PhenoTagger_v1.2_Demo/src/ssplit_tokenzier.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Fri Jun 12 15:26:44 2020 - -@author: luol2 -""" - -import nltk -from nltk.stem import WordNetLemmatizer -from nltk.corpus import wordnet -from nltk.stem.porter import PorterStemmer -lemmatizer = WordNetLemmatizer() -stemmer = PorterStemmer() -import io - -def get_wordnet_pos(treebank_tag): - if treebank_tag.startswith('J'): - return wordnet.ADJ - elif treebank_tag.startswith('V'): - return wordnet.VERB - elif treebank_tag.startswith('N'): - return wordnet.NOUN - elif treebank_tag.startswith('R') or treebank_tag=='IN': - return wordnet.ADV - else: - return wordnet.NOUN - -def ssplit_token_pos_lemma(in_text): - - fout=io.StringIO() - - line=in_text.strip() - line=line.replace('-',' - ').replace('/',' / ') - sentences = nltk.sent_tokenize(line) - sentences = [nltk.word_tokenize(sent) for sent in sentences] -# print(sentences) - for sent in sentences: - token_pos = nltk.pos_tag(sent) - for token in token_pos: - lemma = lemmatizer.lemmatize(token[0].lower(), get_wordnet_pos(token[1])) - stem = stemmer.stem(token[0].lower()) - fout.write(token[0]+'\t'+lemma+'\t'+stem+'\t'+token[1]+'\n') - fout.write('\n') - - return fout.getvalue() \ No newline at end of file diff --git a/spaces/liuxiaopai/chatgpt-demo/README.md b/spaces/liuxiaopai/chatgpt-demo/README.md deleted file mode 100644 index 8d8ff2fe1f3bb586df2bad627286edfee571365a..0000000000000000000000000000000000000000 --- a/spaces/liuxiaopai/chatgpt-demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chatgpt Demo -emoji: 🐠 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -duplicated_from: anzorq/chatgpt-demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/lqinyli/ali/Dockerfile b/spaces/lqinyli/ali/Dockerfile deleted file mode 100644 index 3f4dc07e03f684153c9e32dd4ad296fcc6ee4bca..0000000000000000000000000000000000000000 --- a/spaces/lqinyli/ali/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/devel/cudnn8/Dockerfile -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -ENV DEBIAN_FRONTEND noninteractive - -WORKDIR /content - -RUN apt-get update -y && apt-get upgrade -y && apt-get install -y sudo && apt-get install -y python3-pip && pip3 install --upgrade pip -RUN apt-get install -y curl tzdata aria2 gnupg wget htop sudo git git-lfs software-properties-common build-essential libgl1 zip unzip - -# Config timezone -RUN date -R && sudo ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && date -R - -ENV PATH="/home/admin/.local/bin:${PATH}" -ENV ALIST_TAR="alist-linux-amd64.tar.gz" -# # Alist -# RUN wget https://github.com/alist-org/alist/releases/download/v3.12.2/alist-linux-amd64.tar.gz -RUN curl -s https://api.github.com/repos/alist-org/alist/releases/latest | grep $ALIST_TAR | grep "browser_download_url" | awk '{print$2}' | xargs -I {} wget {} -RUN ls $ALIST_TAR || wget https://github.com/alist-org/alist/releases/download/v3.19.0/alist-linux-amd64.tar.gz -RUN tar -zxvf $ALIST_TAR ; rm *.gz && chmod 777 alist && ls -l - -COPY *.sh . -RUN chmod a+x script.sh - -RUN adduser --disabled-password --gecos '' admin -RUN adduser admin sudo -RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - -RUN chown -R admin:admin /content -RUN chmod -R 777 /content -RUN chown -R admin:admin /home -RUN chmod -R 777 /home -USER admin - -EXPOSE 5244 - -CMD ["./script.sh"] \ No newline at end of file diff --git a/spaces/ludusc/latent-space-theories/frontend/__init__.py b/spaces/ludusc/latent-space-theories/frontend/__init__.py deleted file mode 100644 index e2c2295cf315ff6b3eba65bd8c0cf4dd05d931f6..0000000000000000000000000000000000000000 --- a/spaces/ludusc/latent-space-theories/frontend/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -import streamlit.components.v1 as components - -on_click_graph = components.declare_component( - "on_click_graph", - path="./frontend" -) \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/thrust/thrust/distance.h b/spaces/ma-xu/LIVE/thrust/thrust/distance.h deleted file mode 100644 index 6dd4800be7a8975061fb58777d603f13fb0c82b6..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/distance.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file distance.h - * \brief Computes the size of a range - */ - -#pragma once - -#include -#include - -namespace thrust -{ - - -/*! \addtogroup iterators - * \{ - */ - -/*! \p distance finds the distance between \p first and \p last, i.e. the - * number of times that \p first must be incremented until it is equal to - * \p last. - * - * \param first The beginning of an input range of interest. - * \param last The end of an input range of interest. - * \return The distance between the beginning and end of the input range. - * - * \tparam InputIterator is a model of Input Iterator. - * - * \pre If \c InputIterator meets the requirements of random access iterator, \p last shall be reachable from \p first or - * \p first shall be reachable from \p last; otherwise, \p last shall be reachable from \p first. - * - * The following code snippet demonstrates how to use \p distance to compute - * the distance to one iterator from another. - * - * \code - * #include - * #include - * ... - * thrust::device_vector vec(13); - * thrust::device_vector::iterator iter1 = vec.begin(); - * thrust::device_vector::iterator iter2 = iter1 + 7; - * - * int d = thrust::distance(iter1, iter2); - * - * // d is 7 - * \endcode - * - * \see http://www.sgi.com/tech/stl/distance.html - */ -template -inline __host__ __device__ - typename thrust::iterator_traits::difference_type - distance(InputIterator first, InputIterator last); - -/*! \} // end iterators - */ - -} // end thrust - -#include - diff --git a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/__init__.py b/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/__init__.py deleted file mode 100644 index 10a141788da65ea6527a4eecc9628603824d732b..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Face_Enhancement/data/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import importlib -import torch.utils.data -from data.base_dataset import BaseDataset -from data.face_dataset import FaceTestDataset - - -def create_dataloader(opt): - - instance = FaceTestDataset() - instance.initialize(opt) - print("dataset [%s] of size %d was created" % (type(instance).__name__, len(instance))) - dataloader = torch.utils.data.DataLoader( - instance, - batch_size=opt.batchSize, - shuffle=not opt.serial_batches, - num_workers=int(opt.nThreads), - drop_last=opt.isTrain, - ) - return dataloader diff --git a/spaces/mascIT/AgeGuesser/yolov5/utils/general.c b/spaces/mascIT/AgeGuesser/yolov5/utils/general.c deleted file mode 100644 index d7753bd0db92b6b31d9497dad1f3fdddccd1a24f..0000000000000000000000000000000000000000 --- a/spaces/mascIT/AgeGuesser/yolov5/utils/general.c +++ /dev/null @@ -1,29360 +0,0 @@ -/* Generated by Cython 3.0.0a10 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "pdf_toolbox.lib.dia_yolov5.utils.general", - "sources": [ - "pdf_toolbox\\lib\\dia_yolov5\\utils\\general.py" - ] - }, - "module_name": "pdf_toolbox.lib.dia_yolov5.utils.general" -} -END: Cython Metadata */ - -#ifndef PY_SSIZE_T_CLEAN -#define PY_SSIZE_T_CLEAN -#endif /* PY_SSIZE_T_CLEAN */ -#if defined(CYTHON_LIMITED_API) && 0 - #ifndef Py_LIMITED_API - #if CYTHON_LIMITED_API+0 > 0x03030000 - #define Py_LIMITED_API CYTHON_LIMITED_API - #else - #define Py_LIMITED_API 0x03030000 - #endif - #endif -#endif - -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.7+ or Python 3.3+. -#else -#define CYTHON_ABI "3_0_0a10" -#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI -#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." -#define CYTHON_HEX_VERSION 0x030000AA -#define CYTHON_FUTURE_DIVISION 1 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #define HAVE_LONG_LONG -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#if defined(GRAALVM_PYTHON) - /* For very preliminary testing purposes. Most variables are set the same as PyPy. - The existence of this section does not imply that anything works or is even tested */ - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 1 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYPY_VERSION) - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(CYTHON_LIMITED_API) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 1 - #define CYTHON_COMPILING_IN_GRAAL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 1 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #endif - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 1 - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #ifndef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #endif - #ifndef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #ifndef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000) - #endif - #ifndef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #ifndef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #endif - #if PY_VERSION_HEX < 0x030400a1 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #elif !defined(CYTHON_USE_TP_FINALIZE) - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #if PY_VERSION_HEX < 0x030600B1 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #elif !defined(CYTHON_USE_DICT_VERSIONS) - #define CYTHON_USE_DICT_VERSIONS 1 - #endif - #if PY_VERSION_HEX < 0x030700A3 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #elif !defined(CYTHON_USE_EXC_INFO_STACK) - #define CYTHON_USE_EXC_INFO_STACK 1 - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if !defined(CYTHON_VECTORCALL) -#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) -#endif -#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) -#if CYTHON_USE_PYLONG_INTERNALS - #if PY_MAJOR_VERSION < 3 - #include "longintrepr.h" - #endif - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR - #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - #endif - #endif - #if _MSC_VER < 1300 - #ifdef _WIN64 - typedef unsigned long long __pyx_uintptr_t; - #else - typedef unsigned int __pyx_uintptr_t; - #endif - #else - #ifdef _WIN64 - typedef unsigned __int64 __pyx_uintptr_t; - #else - typedef unsigned __int32 __pyx_uintptr_t; - #endif - #endif -#else - #include - typedef uintptr_t __pyx_uintptr_t; -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_DefaultClassType PyClass_Type - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" - #define __Pyx_DefaultClassType PyType_Type -#if PY_VERSION_HEX >= 0x030B00A1 - static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, - PyObject *code, PyObject *c, PyObject* n, PyObject *v, - PyObject *fv, PyObject *cell, PyObject* fn, - PyObject *name, int fline, PyObject *lnos) { - PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; - PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; - const char *fn_cstr=NULL; - const char *name_cstr=NULL; - PyCodeObject* co=NULL; - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (!(kwds=PyDict_New())) goto end; - if (!(argcount=PyLong_FromLong(a))) goto end; - if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; - if (!(posonlyargcount=PyLong_FromLong(p))) goto end; - if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; - if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; - if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; - if (!(nlocals=PyLong_FromLong(l))) goto end; - if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; - if (!(stacksize=PyLong_FromLong(s))) goto end; - if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; - if (!(flags=PyLong_FromLong(f))) goto end; - if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; - if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; - if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; - if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; - if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; - if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here - if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; - Py_XDECREF((PyObject*)co); - co = (PyCodeObject*)call_result; - call_result = NULL; - if (0) { - cleanup_code_too: - Py_XDECREF((PyObject*)co); - co = NULL; - } - end: - Py_XDECREF(kwds); - Py_XDECREF(argcount); - Py_XDECREF(posonlyargcount); - Py_XDECREF(kwonlyargcount); - Py_XDECREF(nlocals); - Py_XDECREF(stacksize); - Py_XDECREF(replace); - Py_XDECREF(call_result); - Py_XDECREF(empty); - if (type) { - PyErr_Restore(type, value, traceback); - } - return co; - } -#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif -#endif -#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) - #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) -#else - #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_METH_FASTCALL - #define __Pyx_METH_FASTCALL METH_FASTCALL - #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast - #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords -#else - #define __Pyx_METH_FASTCALL METH_VARARGS - #define __Pyx_PyCFunction_FastCall PyCFunction - #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords -#endif -#if CYTHON_VECTORCALL - #define __pyx_vectorcallfunc vectorcallfunc - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET - #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) -#elif CYTHON_BACKPORT_VECTORCALL - typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, - size_t nargsf, PyObject *kwnames); - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) -#else - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) -#endif -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) - typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); -#else - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) - #define __Pyx_PyCMethod PyCMethod -#endif -#ifndef METH_METHOD - #define METH_METHOD 0x200 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyThreadState_Current PyThreadState_Get() -#elif !CYTHON_FAST_THREAD_STATE - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) -{ - void *result; - result = PyModule_GetState(op); - if (!result) - Py_FatalError("Couldn't find the module state"); - return result; -} -#endif -#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) -#else - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if PY_MAJOR_VERSION < 3 - #if CYTHON_COMPILING_IN_PYPY - #if PYPY_VERSION_NUM < 0x07030600 - #if defined(__cplusplus) && __cplusplus >= 201402L - [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] - #elif defined(__GNUC__) || defined(__clang__) - __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) - #elif defined(_MSC_VER) - __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) - #endif - static CYTHON_INLINE int PyGILState_Check(void) { - return 0; - } - #else // PYPY_VERSION_NUM < 0x07030600 - #endif // PYPY_VERSION_NUM < 0x07030600 - #else - static CYTHON_INLINE int PyGILState_Check(void) { - PyThreadState * tstate = _PyThreadState_Current; - return tstate && (tstate == PyGILState_GetThisThreadState()); - } - #endif -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { - PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); - if (res == NULL) PyErr_Clear(); - return res; -} -#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) -#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#else -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { -#if CYTHON_COMPILING_IN_PYPY - return PyDict_GetItem(dict, name); -#else - PyDictEntry *ep; - PyDictObject *mp = (PyDictObject*) dict; - long hash = ((PyStringObject *) name)->ob_shash; - assert(hash != -1); - ep = (mp->ma_lookup)(mp, name, hash); - if (ep == NULL) { - return NULL; - } - return ep->me_value; -#endif -} -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#endif -#if CYTHON_USE_TYPE_SLOTS - #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) - #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) - #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) -#else - #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) - #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) - #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next -#endif -#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 -#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ - PyTypeObject *type = Py_TYPE(obj);\ - assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ - PyObject_GC_Del(obj);\ - Py_DECREF(type);\ -} -#else -#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111) - #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) - #define __Pyx_PyUnicode_DATA(u) ((void*)u) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) -#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #if defined(PyUnicode_IS_READY) - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #else - #define __Pyx_PyUnicode_READY(op) (0) - #endif - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #endif - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #if !defined(PyUnicode_DecodeUnicodeEscape) - #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) - #endif - #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) - #undef PyUnicode_Contains - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) - #endif - #if !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) - #endif - #if !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) - #endif -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#if CYTHON_COMPILING_IN_CPYTHON - #define __Pyx_PySequence_ListKeepNew(obj)\ - (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) -#else - #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__pdf_toolbox__lib__dia_yolov5__utils__general -#define __PYX_HAVE_API__pdf_toolbox__lib__dia_yolov5__utils__general -/* Early includes */ -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u) -{ - const wchar_t *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#else -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) -{ - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#endif -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -#endif -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm = __FILE__; -static const char *__pyx_filename; - -/* #### Code section: filename_table ### */ - -static const char *__pyx_f[] = { - "pdf_toolbox\\\\lib\\\\dia_yolov5\\\\utils\\\\general.py", -}; -/* #### Code section: utility_code_proto_before_types ### */ -/* #### Code section: numeric_typedefs ### */ -/* #### Code section: complex_type_declarations ### */ -/* #### Code section: type_declarations ### */ - -/*--- Type declarations ---*/ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr; -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr; -struct __pyx_defaults; -typedef struct __pyx_defaults __pyx_defaults; -struct __pyx_defaults1; -typedef struct __pyx_defaults1 __pyx_defaults1; -struct __pyx_ctuple_int__and_int; -typedef struct __pyx_ctuple_int__and_int __pyx_ctuple_int__and_int; -struct __pyx_defaults { - PyObject *__pyx_arg_verbose; -}; -struct __pyx_defaults1 { - PyObject *__pyx_arg_class_weights; -}; - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":77 - * np.random.seed(seed) - * torch.manual_seed(seed) - * cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) # <<<<<<<<<<<<<< - * - * - */ -struct __pyx_ctuple_int__and_int { - int f0; - int f1; -}; - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":49 - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) - * - * def try_except(func): # <<<<<<<<<<<<<< - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except { - PyObject_HEAD - PyObject *__pyx_v_func; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":65 - * - * - * def print_args(name, opt): # <<<<<<<<<<<<<< - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args { - PyObject_HEAD - PyObject *__pyx_v_opt; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":67 - * def print_args(name, opt): - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) # <<<<<<<<<<<<<< - * - * - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr { - PyObject_HEAD - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *__pyx_outer_scope; - PyObject *__pyx_v_k; - PyObject *__pyx_v_v; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":80 - * - * - * def intersect_dicts(da, db, exclude=()): # <<<<<<<<<<<<<< - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts { - PyObject_HEAD - PyObject *__pyx_v_exclude; - PyObject *__pyx_8genexpr2__pyx_v_k; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":82 - * def intersect_dicts(da, db, exclude=()): - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} # <<<<<<<<<<<<<< - * - * - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr { - PyObject_HEAD - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *__pyx_outer_scope; - PyObject *__pyx_v_x; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":135 - * - * - * def file_size(path): # <<<<<<<<<<<<<< - * # Return file/dir size (MB) - * path = Path(path) - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size { - PyObject_HEAD - PyObject *__pyx_v_path; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":141 - * return path.stat().st_size / 1E6 - * elif path.is_dir(): - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 # <<<<<<<<<<<<<< - * else: - * return 0.0 - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr { - PyObject_HEAD - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *__pyx_outer_scope; - PyObject *__pyx_v_f; - PyObject *__pyx_t_0; - Py_ssize_t __pyx_t_1; - PyObject *(*__pyx_t_2)(PyObject *); -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":186 - * - * - * def one_cycle(y1=0.0, y2=1.0, steps=100): # <<<<<<<<<<<<<< - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle { - PyObject_HEAD - PyObject *__pyx_v_steps; - PyObject *__pyx_v_y1; - PyObject *__pyx_v_y2; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":191 - * - * - * def colorstr(*input): # <<<<<<<<<<<<<< - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr { - PyObject_HEAD - PyObject *__pyx_v_args; - PyObject *__pyx_v_colors; -}; - - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":213 - * 'bold': '\033[1m', - * 'underline': '\033[4m'} - * return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] # <<<<<<<<<<<<<< - * - * - */ -struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr { - PyObject_HEAD - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *__pyx_outer_scope; - PyObject *__pyx_v_x; -}; - -/* #### Code section: utility_code_proto ### */ - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, Py_ssize_t); - void (*DECREF)(void*, PyObject*, Py_ssize_t); - void (*GOTREF)(void*, PyObject*, Py_ssize_t); - void (*GIVEREF)(void*, PyObject*, Py_ssize_t); - void* (*SetupContext)(const char*, Py_ssize_t, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - } - #define __Pyx_RefNannyFinishContextNogil() {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __Pyx_RefNannyFinishContext();\ - PyGILState_Release(__pyx_gilstate_save);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) - #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContextNogil() - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_Py_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; Py_XDECREF(tmp);\ - } while (0) -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* TupleAndListFromArray.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); -static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); -#endif - -/* IncludeStringH.proto */ -#include - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* fastcall.proto */ -#define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) -#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) -#define __Pyx_KwValues_VARARGS(args, nargs) NULL -#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) -#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) -#if CYTHON_METH_FASTCALL - #define __Pyx_Arg_FASTCALL(args, i) args[i] - #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) - #define __Pyx_KwValues_FASTCALL(args, nargs) (&args[nargs]) - static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); - #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) -#else - #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS - #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS - #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS - #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS - #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS -#endif -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) -#else -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) -#endif - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, - const char* function_name); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyIntCompare.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace); - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#if !CYTHON_VECTORCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif -#if !CYTHON_VECTORCALL - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif // !CYTHON_VECTORCALL -#endif - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectFastCall.proto */ -#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); - -/* KeywordStringCheck.proto */ -static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed); - -/* RaiseClosureNameError.proto */ -static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname); - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* IncludeStructmemberH.proto */ -#include - -/* FixUpExtensionType.proto */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); -#endif - -/* FetchCommonType.proto */ -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); -#else -static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); -#endif - -/* PyMethodNew.proto */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { - CYTHON_UNUSED_VAR(typ); - if (!self) - return __Pyx_NewRef(func); - return PyMethod_New(func, self); -} -#else - #define __Pyx_PyMethod_New PyMethod_New -#endif - -/* PyVectorcallFastCallDict.proto */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); -#endif - -/* CythonFunctionShared.proto */ -#define __Pyx_CyFunction_USED -#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 -#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 -#define __Pyx_CYFUNCTION_CCLASS 0x04 -#define __Pyx_CYFUNCTION_COROUTINE 0x08 -#define __Pyx_CyFunction_GetClosure(f)\ - (((__pyx_CyFunctionObject *) (f))->func_closure) -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_CyFunction_GetClassObj(f)\ - (((__pyx_CyFunctionObject *) (f))->func_classobj) -#else - #define __Pyx_CyFunction_GetClassObj(f)\ - ((PyObject*) ((PyCMethodObject *) (f))->mm_class) -#endif -#define __Pyx_CyFunction_SetClassObj(f, classobj)\ - __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) -#define __Pyx_CyFunction_Defaults(type, f)\ - ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) -#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ - ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) -typedef struct { -#if PY_VERSION_HEX < 0x030900B1 - PyCFunctionObject func; -#else - PyCMethodObject func; -#endif -#if CYTHON_BACKPORT_VECTORCALL - __pyx_vectorcallfunc func_vectorcall; -#endif -#if PY_VERSION_HEX < 0x030500A0 - PyObject *func_weakreflist; -#endif - PyObject *func_dict; - PyObject *func_name; - PyObject *func_qualname; - PyObject *func_doc; - PyObject *func_globals; - PyObject *func_code; - PyObject *func_closure; -#if PY_VERSION_HEX < 0x030900B1 - PyObject *func_classobj; -#endif - void *defaults; - int defaults_pyobjects; - size_t defaults_size; // used by FusedFunction for copying defaults - int flags; - PyObject *defaults_tuple; - PyObject *defaults_kwdict; - PyObject *(*defaults_getter)(PyObject *); - PyObject *func_annotations; - PyObject *func_is_coroutine; -} __pyx_CyFunctionObject; -#if !CYTHON_USE_MODULE_STATE -static PyTypeObject *__pyx_CyFunctionType = 0; -#endif -#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) -#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) -#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, - size_t size, - int pyobjects); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, - PyObject *tuple); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, - PyObject *dict); -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, - PyObject *dict); -static int __pyx_CyFunction_init(PyObject *module); -#if CYTHON_METH_FASTCALL -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -#if CYTHON_BACKPORT_VECTORCALL -#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) -#else -#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) -#endif -#endif - -/* CythonFunction.proto */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); - -/* CallableCheck.proto */ -#if CYTHON_USE_TYPE_SLOTS && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyCallable_Check(obj) (Py_TYPE(obj)->tp_call != NULL) -#else -#define __Pyx_PyCallable_Check(obj) PyCallable_Check(obj) -#endif - -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* IterFinish.proto */ -static CYTHON_INLINE int __Pyx_IterFinish(void); - -/* PyObjectCallNoArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); - -/* PyObjectGetMethod.proto */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); - -/* PyObjectCallMethod0.proto */ -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* UnpackItemEndCheck.proto */ -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); - -/* RaiseNoneIterError.proto */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/* UnpackTupleError.proto */ -static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); - -/* UnpackTuple2.proto */ -#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple)\ - (likely(is_tuple || PyTuple_Check(tuple)) ?\ - (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ?\ - __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) :\ - (__Pyx_UnpackTupleError(tuple, 2), -1)) :\ - __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple)) -static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( - PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple); -static int __Pyx_unpack_tuple2_generic( - PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple); - -/* dict_iter.proto */ -static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name, - Py_ssize_t* p_orig_length, int* p_is_dict); -static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, - PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); - -/* PyObjectFormatSimple.proto */ -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - PyObject_Format(s, f)) -#elif PY_MAJOR_VERSION < 3 - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") :\ - PyObject_Format(s, f)) -#elif CYTHON_USE_TYPE_SLOTS - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) :\ - likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) :\ - PyObject_Format(s, f)) -#else - #define __Pyx_PyObject_FormatSimple(s, f) (\ - likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ - PyObject_Format(s, f)) -#endif - -/* JoinPyUnicode.proto */ -static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, - Py_UCS4 max_char); - -/* pep479.proto */ -static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); - -/* UnicodeConcatInPlace.proto */ -# if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 - #if CYTHON_REFNANNY - #define __Pyx_PyUnicode_ConcatInPlace(left, right) __Pyx_PyUnicode_ConcatInPlaceImpl(&left, right, __pyx_refnanny) - #else - #define __Pyx_PyUnicode_ConcatInPlace(left, right) __Pyx_PyUnicode_ConcatInPlaceImpl(&left, right) - #endif - static CYTHON_INLINE PyObject *__Pyx_PyUnicode_ConcatInPlaceImpl(PyObject **p_left, PyObject *right - #if CYTHON_REFNANNY - , void* __pyx_refnanny - #endif - ); -#else -#define __Pyx_PyUnicode_ConcatInPlace __Pyx_PyUnicode_Concat -#endif -#define __Pyx_PyUnicode_ConcatInPlaceSafe(left, right) ((unlikely((left) == Py_None) || unlikely((right) == Py_None)) ?\ - PyNumber_InPlaceAdd(left, right) : __Pyx_PyUnicode_ConcatInPlace(left, right)) - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* ImportDottedModule.proto */ -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); - -/* PyObjectSetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); -#else -#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) -#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) -#endif - -/* PySequenceContains.proto */ -static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { - int result = PySequence_Contains(seq, item); - return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); -} - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* dict_getitem_default.proto */ -static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value); - -/* UnpackUnboundCMethod.proto */ -typedef struct { - PyObject *type; - PyObject **method_name; - PyCFunction func; - PyObject *method; - int flag; -} __Pyx_CachedCFunction; - -/* CallUnboundCMethod1.proto */ -static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg); -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg); -#else -#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg) -#endif - -/* CallUnboundCMethod2.proto */ -static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1 -static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); -#else -#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2) -#endif - -/* PyObjectLookupSpecial.proto */ -#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS -#define __Pyx_PyObject_LookupSpecialNoError(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 0) -#define __Pyx_PyObject_LookupSpecial(obj, attr_name) __Pyx__PyObject_LookupSpecial(obj, attr_name, 1) -static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error); -#else -#define __Pyx_PyObject_LookupSpecialNoError(o,n) __Pyx_PyObject_GetAttrStrNoError(o,n) -#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n) -#endif - -/* PyObject_Str.proto */ -#define __Pyx_PyObject_Str(obj)\ - (likely(PyString_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj)) - -/* PyFloatBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyFloat_TrueDivideObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyFloat_TrueDivideObjC(op1, op2, floatval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceTrueDivide(op1, op2) : PyNumber_TrueDivide(op1, op2)) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_TrueDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_TrueDivideObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceTrueDivide(op1, op2) : PyNumber_TrueDivide(op1, op2)) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractCObj(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_SubtractCObj(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) -#endif - -/* DictGetItem.proto */ -#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY -static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); -#define __Pyx_PyObject_Dict_GetItem(obj, name)\ - (likely(PyDict_CheckExact(obj)) ?\ - __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) -#else -#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) -#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* SliceObject.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( - PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** py_start, PyObject** py_stop, PyObject** py_slice, - int has_cstart, int has_cstop, int wraparound); - -/* ValidateBasesTuple.proto */ -#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS -static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases); -#endif - -/* PyType_Ready.proto */ -static CYTHON_UNUSED int __Pyx_PyType_Ready(PyTypeObject *t); - -/* PyObject_GenericGetAttrNoDict.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -#if !CYTHON_COMPILING_IN_LIMITED_API -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); -#endif - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -/* GCCDiagnostics.proto */ -#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* FormatTypeName.proto */ -#if CYTHON_COMPILING_IN_LIMITED_API -typedef PyObject *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%U" -static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); -#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) -#else -typedef const char *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%.200s" -#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) -#define __Pyx_DECREF_TypeName(obj) -#endif - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_Occurred(), err1, err2) -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -/* PyObjectCall2Args.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectCallMethod1.proto */ -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); - -/* CoroutineBase.proto */ -struct __pyx_CoroutineObject; -typedef PyObject *(*__pyx_coroutine_body_t)(struct __pyx_CoroutineObject *, PyThreadState *, PyObject *); -#if CYTHON_USE_EXC_INFO_STACK -#define __Pyx_ExcInfoStruct _PyErr_StackItem -#else -typedef struct { - PyObject *exc_type; - PyObject *exc_value; - PyObject *exc_traceback; -} __Pyx_ExcInfoStruct; -#endif -typedef struct __pyx_CoroutineObject { - PyObject_HEAD - __pyx_coroutine_body_t body; - PyObject *closure; - __Pyx_ExcInfoStruct gi_exc_state; - PyObject *gi_weakreflist; - PyObject *classobj; - PyObject *yieldfrom; - PyObject *gi_name; - PyObject *gi_qualname; - PyObject *gi_modulename; - PyObject *gi_code; - PyObject *gi_frame; - int resume_label; - char is_running; -} __pyx_CoroutineObject; -static __pyx_CoroutineObject *__Pyx__Coroutine_New( - PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name); -static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( - __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name); -static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self); -static int __Pyx_Coroutine_clear(PyObject *self); -static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); -static PyObject *__Pyx_Coroutine_Close(PyObject *self); -static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); -#if CYTHON_USE_EXC_INFO_STACK -#define __Pyx_Coroutine_SwapException(self) -#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state) -#else -#define __Pyx_Coroutine_SwapException(self) {\ - __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback);\ - __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state);\ - } -#define __Pyx_Coroutine_ResetAndClearException(self) {\ - __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback);\ - (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL;\ - } -#endif -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\ - __Pyx_PyGen__FetchStopIterationValue(__pyx_tstate, pvalue) -#else -#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\ - __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue) -#endif -static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); -static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); - -/* PatchModuleWithCoroutine.proto */ -static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); - -/* PatchGeneratorABC.proto */ -static int __Pyx_patch_abc(void); - -/* Generator.proto */ -#define __Pyx_Generator_USED -static PyTypeObject *__pyx_GeneratorType = 0; -#define __Pyx_Generator_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_GeneratorType) -#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name)\ - __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name) -static PyObject *__Pyx_Generator_Next(PyObject *self); -static int __pyx_Generator_init(PyObject *module); - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -#if CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str); -#else -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); -#endif - -/* #### Code section: module_declarations ### */ - -/* Module declarations from "pdf_toolbox.lib.dia_yolov5.utils.general" */ -#if !CYTHON_USE_MODULE_STATE -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr = 0; -static PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr = 0; -#endif -/* #### Code section: typeinfo ### */ -/* #### Code section: before_global_var ### */ -#define __Pyx_MODULE_NAME "pdf_toolbox.lib.dia_yolov5.utils.general" -extern int __pyx_module_is_main_pdf_toolbox__lib__dia_yolov5__utils__general; -int __pyx_module_is_main_pdf_toolbox__lib__dia_yolov5__utils__general = 0; - -/* Implementation of "pdf_toolbox.lib.dia_yolov5.utils.general" */ -/* #### Code section: global_var ### */ -static PyObject *__pyx_builtin_print; -static PyObject *__pyx_builtin_vars; -static PyObject *__pyx_builtin_max; -static PyObject *__pyx_builtin_open; -static PyObject *__pyx_builtin_OSError; -static PyObject *__pyx_builtin_sum; -static PyObject *__pyx_builtin_AssertionError; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_range; -/* #### Code section: string_decls ### */ -static const char __pyx_k_T[] = "T"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k_d[] = "(\\d+)"; -static const char __pyx_k_e[] = "e"; -static const char __pyx_k_f[] = "f"; -static const char __pyx_k_h[] = "h"; -static const char __pyx_k_i[] = "i"; -static const char __pyx_k_j[] = "j"; -static const char __pyx_k_k[] = "k"; -static const char __pyx_k_l[] = "l"; -static const char __pyx_k_m[] = "m"; -static const char __pyx_k_n[] = "n"; -static const char __pyx_k_s[] = "s"; -static const char __pyx_k_t[] = "t"; -static const char __pyx_k_v[] = "v"; -static const char __pyx_k_w[] = "w"; -static const char __pyx_k_x[] = "x"; -static const char __pyx_k_y[] = "y"; -static const char __pyx_k_0m[] = "\033[0m"; -static const char __pyx_k_1m[] = "\033[1m"; -static const char __pyx_k_4m[] = "\033[4m"; -static const char __pyx_k__4[] = "__"; -static const char __pyx_k__5[] = "="; -static const char __pyx_k__6[] = ": "; -static const char __pyx_k__7[] = ", "; -static const char __pyx_k__9[] = "*"; -static const char __pyx_k_da[] = "da"; -static const char __pyx_k_db[] = "db"; -static const char __pyx_k_gc[] = "gc"; -static const char __pyx_k_mm[] = "mm"; -static const char __pyx_k_nc[] = "nc"; -static const char __pyx_k_np[] = "np"; -static const char __pyx_k_os[] = "os"; -static const char __pyx_k_pd[] = "pd"; -static const char __pyx_k_pi[] = "pi"; -static const char __pyx_k_re[] = "re"; -static const char __pyx_k_xc[] = "xc"; -static const char __pyx_k_xi[] = "xi"; -static const char __pyx_k_y1[] = "y1"; -static const char __pyx_k_y2[] = "y2"; -static const char __pyx_k_30m[] = "\033[30m"; -static const char __pyx_k_31m[] = "\033[31m"; -static const char __pyx_k_32m[] = "\033[32m"; -static const char __pyx_k_33m[] = "\033[33m"; -static const char __pyx_k_34m[] = "\033[34m"; -static const char __pyx_k_35m[] = "\033[35m"; -static const char __pyx_k_36m[] = "\033[36m"; -static const char __pyx_k_37m[] = "\033[37m"; -static const char __pyx_k_90m[] = "\033[90m"; -static const char __pyx_k_91m[] = "\033[91m"; -static const char __pyx_k_92m[] = "\033[92m"; -static const char __pyx_k_93m[] = "\033[93m"; -static const char __pyx_k_94m[] = "\033[94m"; -static const char __pyx_k_95m[] = "\033[95m"; -static const char __pyx_k_96m[] = "\033[96m"; -static const char __pyx_k_97m[] = "\033[97m"; -static const char __pyx_k__10[] = "."; -static const char __pyx_k__11[] = ""; -static const char __pyx_k__14[] = "\344\272\272\345\267\245\346\231\272\350\203\275"; -static const char __pyx_k__15[] = "[\344\270\200-\351\277\277]"; -static const char __pyx_k__16[] = "**/*"; -static const char __pyx_k__17[] = ":/"; -static const char __pyx_k__18[] = "://"; -static const char __pyx_k__20[] = "?"; -static const char __pyx_k__21[] = "[|@#!\302\241\302\267$\342\202\254%&()=?\302\277^*;:,\302\250\302\264><+]"; -static const char __pyx_k__22[] = "_"; -static const char __pyx_k_any[] = "any"; -static const char __pyx_k_box[] = "box"; -static const char __pyx_k_cat[] = "cat"; -static const char __pyx_k_cfg[] = "cfg"; -static const char __pyx_k_cos[] = "cos"; -static const char __pyx_k_cv2[] = "cv2"; -static const char __pyx_k_d_2[] = "d"; -static const char __pyx_k_dir[] = "dir"; -static const char __pyx_k_end[] = "end"; -static const char __pyx_k_env[] = "env"; -static const char __pyx_k_eps[] = "eps"; -static const char __pyx_k_get[] = "get"; -static const char __pyx_k_int[] = "int"; -static const char __pyx_k_iou[] = "iou"; -static const char __pyx_k_key[] = "key"; -static const char __pyx_k_max[] = "max"; -static const char __pyx_k_nms[] = "nms"; -static const char __pyx_k_ops[] = "ops"; -static const char __pyx_k_opt[] = "opt"; -static const char __pyx_k_pad[] = "pad"; -static const char __pyx_k_red[] = "red"; -static const char __pyx_k_s_2[] = "%s"; -static const char __pyx_k_sep[] = "sep"; -static const char __pyx_k_str[] = "str"; -static const char __pyx_k_sub[] = "sub"; -static const char __pyx_k_sum[] = "sum"; -static const char __pyx_k_tmp[] = "/tmp"; -static const char __pyx_k_url[] = "url"; -static const char __pyx_k_FILE[] = "FILE"; -static const char __pyx_k_INFO[] = "INFO"; -static const char __pyx_k_Path[] = "Path"; -static const char __pyx_k_RANK[] = "RANK"; -static const char __pyx_k_ROOT[] = "ROOT"; -static const char __pyx_k_R_OK[] = "R_OK"; -static const char __pyx_k_args[] = "args"; -static const char __pyx_k_blue[] = "blue"; -static const char __pyx_k_bold[] = "bold"; -static const char __pyx_k_ceil[] = "ceil"; -static const char __pyx_k_clip[] = "clip"; -static const char __pyx_k_conf[] = "conf"; -static const char __pyx_k_copy[] = "copy"; -static const char __pyx_k_cyan[] = "cyan"; -static const char __pyx_k_dirs[] = "dirs"; -static const char __pyx_k_exit[] = "__exit__"; -static const char __pyx_k_file[] = "__file__"; -static const char __pyx_k_func[] = "func"; -static const char __pyx_k_gain[] = "gain"; -static const char __pyx_k_glob[] = "glob"; -static const char __pyx_k_hard[] = "hard"; -static const char __pyx_k_home[] = "home"; -static const char __pyx_k_info[] = "info"; -static const char __pyx_k_long[] = "long"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_math[] = "math"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_ones[] = "ones"; -static const char __pyx_k_open[] = "open"; -static const char __pyx_k_padh[] = "padh"; -static const char __pyx_k_padw[] = "padw"; -static const char __pyx_k_path[] = "path"; -static const char __pyx_k_rank[] = "rank"; -static const char __pyx_k_repl[] = "repl"; -static const char __pyx_k_seed[] = "seed"; -static const char __pyx_k_send[] = "send"; -static const char __pyx_k_spec[] = "__spec__"; -static const char __pyx_k_stat[] = "stat"; -static const char __pyx_k_stem[] = "stem"; -static const char __pyx_k_test[] = "test"; -static const char __pyx_k_time[] = "time"; -static const char __pyx_k_true[] = "true"; -static const char __pyx_k_vars[] = "vars"; -static const char __pyx_k_view[] = "view"; -static const char __pyx_k_yaml[] = "yaml"; -static const char __pyx_k_0_0_0[] = "0.0.0"; -static const char __pyx_k_11_5g[] = "{:11.5g}"; -static const char __pyx_k_3_6_2[] = "3.6.2"; -static const char __pyx_k_Linux[] = "Linux"; -static const char __pyx_k_NCOLS[] = "NCOLS"; -static const char __pyx_k_array[] = "array"; -static const char __pyx_k_ascii[] = "ascii"; -static const char __pyx_k_black[] = "black"; -static const char __pyx_k_boxes[] = "boxes"; -static const char __pyx_k_clamp[] = "clamp_"; -static const char __pyx_k_clone[] = "clone"; -static const char __pyx_k_close[] = "close"; -static const char __pyx_k_cudnn[] = "cudnn"; -static const char __pyx_k_enter[] = "__enter__"; -static const char __pyx_k_float[] = "float"; -static const char __pyx_k_floor[] = "floor"; -static const char __pyx_k_green[] = "green"; -static const char __pyx_k_imgsz[] = "imgsz"; -static const char __pyx_k_input[] = "input"; -static const char __pyx_k_items[] = "items"; -static const char __pyx_k_level[] = "level"; -static const char __pyx_k_lower[] = "lower"; -static const char __pyx_k_merge[] = "merge"; -static const char __pyx_k_mkdir[] = "mkdir"; -static const char __pyx_k_numpy[] = "numpy"; -static const char __pyx_k_parse[] = "parse"; -static const char __pyx_k_print[] = "print"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_split[] = "split"; -static const char __pyx_k_steps[] = "steps"; -static const char __pyx_k_throw[] = "throw"; -static const char __pyx_k_torch[] = "torch"; -static const char __pyx_k_white[] = "white"; -static const char __pyx_k_zeros[] = "zeros"; -static const char __pyx_k_Darwin[] = "Darwin"; -static const char __pyx_k_LOGGER[] = "LOGGER"; -static const char __pyx_k_Python[] = "Python "; -static const char __pyx_k_Tensor[] = "Tensor"; -static const char __pyx_k_access[] = "access"; -static const char __pyx_k_astype[] = "astype"; -static const char __pyx_k_colors[] = "colors"; -static const char __pyx_k_config[] = ".config"; -static const char __pyx_k_coords[] = "coords"; -static const char __pyx_k_decode[] = "decode"; -static const char __pyx_k_device[] = "device"; -static const char __pyx_k_emojis[] = "emojis"; -static const char __pyx_k_enable[] = "enable"; -static const char __pyx_k_encode[] = "encode"; -static const char __pyx_k_exists[] = "exists"; -static const char __pyx_k_file_2[] = "file"; -static const char __pyx_k_format[] = "format"; -static const char __pyx_k_getenv[] = "getenv"; -static const char __pyx_k_groups[] = "groups"; -static const char __pyx_k_ignore[] = "ignore"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_is_dir[] = "is_dir"; -static const char __pyx_k_kwargs[] = "kwargs"; -static const char __pyx_k_labels[] = "labels"; -static const char __pyx_k_max_wh[] = "max_wh"; -static const char __pyx_k_min_wh[] = "min_wh"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_output[] = "output"; -static const char __pyx_k_pandas[] = "pandas"; -static const char __pyx_k_pinned[] = "pinned"; -static const char __pyx_k_random[] = "random"; -static const char __pyx_k_scores[] = "scores"; -static const char __pyx_k_search[] = "search"; -static const char __pyx_k_shutil[] = "shutil"; -static const char __pyx_k_string[] = "string"; -static const char __pyx_k_suffix[] = "suffix"; -static const char __pyx_k_system[] = "system"; -static const char __pyx_k_tensor[] = "tensor"; -static const char __pyx_k_test_2[] = "__test__"; -static const char __pyx_k_unlink[] = "unlink"; -static const char __pyx_k_urllib[] = "urllib"; -static const char __pyx_k_xyn2xy[] = "xyn2xy"; -static const char __pyx_k_yellow[] = "yellow"; -static const char __pyx_k_yolov5[] = "yolov5"; -static const char __pyx_k_OSError[] = "OSError"; -static const char __pyx_k_VERBOSE[] = "VERBOSE"; -static const char __pyx_k_WARNING[] = "WARNING"; -static const char __pyx_k_Windows[] = "Windows"; -static const char __pyx_k_argsort[] = "argsort"; -static const char __pyx_k_box_iou[] = "box_iou"; -static const char __pyx_k_classes[] = "classes"; -static const char __pyx_k_columns[] = "columns"; -static const char __pyx_k_current[] = "current"; -static const char __pyx_k_disable[] = "disable"; -static const char __pyx_k_display[] = "display"; -static const char __pyx_k_divisor[] = "divisor"; -static const char __pyx_k_env_var[] = "env_var"; -static const char __pyx_k_environ[] = "environ"; -static const char __pyx_k_exclude[] = "exclude"; -static const char __pyx_k_fitness[] = "fitness"; -static const char __pyx_k_genexpr[] = "genexpr"; -static const char __pyx_k_handler[] = "handler"; -static const char __pyx_k_is_file[] = "is_file"; -static const char __pyx_k_keepdim[] = "keepdim"; -static const char __pyx_k_last_pt[] = "/**/last*.pt"; -static const char __pyx_k_logging[] = "logging"; -static const char __pyx_k_magenta[] = "magenta"; -static const char __pyx_k_matches[] = "matches"; -static const char __pyx_k_max_det[] = "max_det"; -static const char __pyx_k_max_nms[] = "max_nms"; -static const char __pyx_k_methods[] = "methods"; -static const char __pyx_k_minimum[] = "minimum"; -static const char __pyx_k_nonzero[] = "nonzero"; -static const char __pyx_k_options[] = "options"; -static const char __pyx_k_parents[] = "parents"; -static const char __pyx_k_pathlib[] = "pathlib"; -static const char __pyx_k_pattern[] = "pattern"; -static const char __pyx_k_profile[] = "profile"; -static const char __pyx_k_replace[] = "replace"; -static const char __pyx_k_reshape[] = "reshape"; -static const char __pyx_k_resolve[] = "resolve"; -static const char __pyx_k_st_size[] = "st_size"; -static const char __pyx_k_tmp_txt[] = "tmp.txt"; -static const char __pyx_k_unquote[] = "unquote"; -static const char __pyx_k_verbose[] = "verbose"; -static const char __pyx_k_version[] = "version "; -static const char __pyx_k_warning[] = "warning"; -static const char __pyx_k_weights[] = "weights"; -static const char __pyx_k_agnostic[] = "agnostic"; -static const char __pyx_k_as_tuple[] = "as_tuple"; -static const char __pyx_k_backends[] = "backends"; -static const char __pyx_k_bincount[] = "bincount"; -static const char __pyx_k_colorstr[] = "colorstr"; -static const char __pyx_k_exist_ok[] = "exist_ok"; -static const char __pyx_k_getctime[] = "getctime"; -static const char __pyx_k_instance[] = "instance"; -static const char __pyx_k_is_ascii[] = "is_ascii"; -static const char __pyx_k_new_size[] = "new_size"; -static const char __pyx_k_platform[] = "platform"; -static const char __pyx_k_url2file[] = "url2file"; -static const char __pyx_k_benchmark[] = "benchmark"; -static const char __pyx_k_clean_str[] = "clean_str"; -static const char __pyx_k_cpu_count[] = "cpu_count"; -static const char __pyx_k_enumerate[] = "enumerate"; -static const char __pyx_k_file_size[] = "file_size"; -static const char __pyx_k_formatter[] = "formatter"; -static const char __pyx_k_getLogger[] = "getLogger"; -static const char __pyx_k_iou_thres[] = "iou_thres"; -static const char __pyx_k_isenabled[] = "isenabled"; -static const char __pyx_k_last_list[] = "last_list"; -static const char __pyx_k_linewidth[] = "linewidth"; -static const char __pyx_k_message_s[] = "%(message)s"; -static const char __pyx_k_minlength[] = "minlength"; -static const char __pyx_k_one_cycle[] = "one_cycle"; -static const char __pyx_k_precision[] = "precision"; -static const char __pyx_k_ratio_pad[] = "ratio_pad"; -static const char __pyx_k_recursive[] = "recursive"; -static const char __pyx_k_redundant[] = "redundant"; -static const char __pyx_k_underline[] = "underline"; -static const char __pyx_k_xywh2xyxy[] = "xywh2xyxy"; -static const char __pyx_k_xyxy2xywh[] = "xyxy2xywh"; -static const char __pyx_k_bright_red[] = "bright_red"; -static const char __pyx_k_conf_thres[] = "conf_thres"; -static const char __pyx_k_descending[] = "descending"; -static const char __pyx_k_float_kind[] = "float_kind"; -static const char __pyx_k_from_numpy[] = "from_numpy"; -static const char __pyx_k_img0_shape[] = "img0_shape"; -static const char __pyx_k_img1_shape[] = "img1_shape"; -static const char __pyx_k_init_seeds[] = "init_seeds"; -static const char __pyx_k_is_chinese[] = "is_chinese"; -static const char __pyx_k_prediction[] = "prediction"; -static const char __pyx_k_print_args[] = "print_args"; -static const char __pyx_k_s_exceeded[] = "s exceeded"; -static const char __pyx_k_search_dir[] = "search_dir"; -static const char __pyx_k_startswith[] = "startswith"; -static const char __pyx_k_time_limit[] = "time_limit"; -static const char __pyx_k_try_except[] = "try_except"; -static const char __pyx_k_xywhn2xyxy[] = "xywhn2xyxy"; -static const char __pyx_k_xyxy2xywhn[] = "xyxy2xywhn"; -static const char __pyx_k_Invalid_IoU[] = "Invalid IoU "; -static const char __pyx_k_NUM_THREADS[] = "NUM_THREADS"; -static const char __pyx_k_Ultralytics[] = "Ultralytics"; -static const char __pyx_k_basicConfig[] = "basicConfig"; -static const char __pyx_k_bright_blue[] = "bright_blue"; -static const char __pyx_k_bright_cyan[] = "bright_cyan"; -static const char __pyx_k_clip_coords[] = "clip_coords"; -static const char __pyx_k_concatenate[] = "concatenate"; -static const char __pyx_k_manual_seed[] = "manual_seed"; -static const char __pyx_k_max_columns[] = "max_columns"; -static const char __pyx_k_multi_label[] = "multi_label"; -static const char __pyx_k_set_logging[] = "set_logging"; -static const char __pyx_k_torchvision[] = "torchvision"; -static const char __pyx_k_updating_to[] = ", updating to "; -static const char __pyx_k_with_suffix[] = "with_suffix"; -static const char __pyx_k_bright_black[] = "bright_black"; -static const char __pyx_k_bright_green[] = "bright_green"; -static const char __pyx_k_bright_white[] = "bright_white"; -static const char __pyx_k_check_python[] = "check_python"; -static const char __pyx_k_class_counts[] = "class_counts"; -static const char __pyx_k_initializing[] = "_initializing"; -static const char __pyx_k_is_coroutine[] = "_is_coroutine"; -static const char __pyx_k_is_writeable[] = "is_writeable"; -static const char __pyx_k_scale_coords[] = "scale_coords"; -static const char __pyx_k_General_utils[] = "\nGeneral utils\n"; -static const char __pyx_k_bright_yellow[] = "bright_yellow"; -static const char __pyx_k_check_version[] = "check_version"; -static const char __pyx_k_class_getitem[] = "__class_getitem__"; -static const char __pyx_k_class_weights[] = "class_weights"; -static const char __pyx_k_deterministic[] = "deterministic"; -static const char __pyx_k_image_weights[] = "image_weights"; -static const char __pyx_k_setNumThreads[] = "setNumThreads"; -static const char __pyx_k_AssertionError[] = "AssertionError"; -static const char __pyx_k_YOLOv5_VERBOSE[] = "YOLOv5_VERBOSE"; -static const char __pyx_k_bright_magenta[] = "bright_magenta"; -static const char __pyx_k_check_img_size[] = "check_img_size"; -static const char __pyx_k_get_latest_run[] = "get_latest_run"; -static const char __pyx_k_increment_path[] = "increment_path"; -static const char __pyx_k_make_divisible[] = "make_divisible"; -static const char __pyx_k_python_version[] = "python_version"; -static const char __pyx_k_AppData_Roaming[] = "AppData/Roaming"; -static const char __pyx_k_intersect_dicts[] = "intersect_dicts"; -static const char __pyx_k_user_config_dir[] = "user_config_dir"; -static const char __pyx_k_WARNING_img_size[] = "WARNING: --img-size "; -static const char __pyx_k_set_printoptions[] = "set_printoptions"; -static const char __pyx_k_YOLOV5_CONFIG_DIR[] = "YOLOV5_CONFIG_DIR"; -static const char __pyx_k_get_terminal_size[] = "get_terminal_size"; -static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_NUMEXPR_MAX_THREADS[] = "NUMEXPR_MAX_THREADS"; -static const char __pyx_k_non_max_suppression[] = "non_max_suppression"; -static const char __pyx_k_torch_backends_cudnn[] = "torch.backends.cudnn"; -static const char __pyx_k_WARNING_NMS_time_limit[] = "WARNING: NMS time limit "; -static const char __pyx_k_colorstr_locals_genexpr[] = "colorstr..genexpr"; -static const char __pyx_k_labels_to_class_weights[] = "labels_to_class_weights"; -static const char __pyx_k_labels_to_image_weights[] = "labels_to_image_weights"; -static const char __pyx_k_one_cycle_locals_lambda[] = "one_cycle.."; -static const char __pyx_k_file_size_locals_genexpr[] = "file_size..genexpr"; -static const char __pyx_k_print_args_locals_genexpr[] = "print_args..genexpr"; -static const char __pyx_k_try_except_locals_handler[] = "try_except..handler"; -static const char __pyx_k_Library_Application_Support[] = "Library/Application Support"; -static const char __pyx_k_Invalid_Confidence_threshold[] = "Invalid Confidence threshold "; -static const char __pyx_k_intersect_dicts_locals_genexpr[] = "intersect_dicts..genexpr"; -static const char __pyx_k_must_be_multiple_of_max_stride[] = " must be multiple of max stride "; -static const char __pyx_k_valid_values_are_between_0_0_an[] = ", valid values are between 0.0 and 1.0"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils[] = "pdf_toolbox\\lib\\dia_yolov5\\utils\\general.py"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2[] = "pdf_toolbox.lib.dia_yolov5.utils.general"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3[] = "pdf_toolbox.lib.dia_yolov5.utils.metrics"; -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_kp_u_0_0_0; -static PyObject *__pyx_kp_u_0m; -static PyObject *__pyx_kp_u_11_5g; -static PyObject *__pyx_kp_u_1m; -static PyObject *__pyx_kp_u_30m; -static PyObject *__pyx_kp_u_31m; -static PyObject *__pyx_kp_u_32m; -static PyObject *__pyx_kp_u_33m; -static PyObject *__pyx_kp_u_34m; -static PyObject *__pyx_kp_u_35m; -static PyObject *__pyx_kp_u_36m; -static PyObject *__pyx_kp_u_37m; -static PyObject *__pyx_kp_u_3_6_2; -static PyObject *__pyx_kp_u_4m; -static PyObject *__pyx_kp_u_90m; -static PyObject *__pyx_kp_u_91m; -static PyObject *__pyx_kp_u_92m; -static PyObject *__pyx_kp_u_93m; -static PyObject *__pyx_kp_u_94m; -static PyObject *__pyx_kp_u_95m; -static PyObject *__pyx_kp_u_96m; -static PyObject *__pyx_kp_u_97m; -static PyObject *__pyx_kp_u_AppData_Roaming; -static PyObject *__pyx_n_s_AssertionError; -static PyObject *__pyx_n_u_Darwin; -static PyObject *__pyx_n_s_FILE; -static PyObject *__pyx_n_s_INFO; -static PyObject *__pyx_kp_u_Invalid_Confidence_threshold; -static PyObject *__pyx_kp_u_Invalid_IoU; -static PyObject *__pyx_n_s_LOGGER; -static PyObject *__pyx_kp_u_Library_Application_Support; -static PyObject *__pyx_n_u_Linux; -static PyObject *__pyx_n_s_NCOLS; -static PyObject *__pyx_n_u_NUMEXPR_MAX_THREADS; -static PyObject *__pyx_n_s_NUM_THREADS; -static PyObject *__pyx_n_s_OSError; -static PyObject *__pyx_n_s_Path; -static PyObject *__pyx_kp_u_Python; -static PyObject *__pyx_n_u_RANK; -static PyObject *__pyx_n_s_ROOT; -static PyObject *__pyx_n_s_R_OK; -static PyObject *__pyx_n_s_T; -static PyObject *__pyx_n_s_Tensor; -static PyObject *__pyx_n_u_Ultralytics; -static PyObject *__pyx_n_s_VERBOSE; -static PyObject *__pyx_n_s_WARNING; -static PyObject *__pyx_kp_u_WARNING_NMS_time_limit; -static PyObject *__pyx_kp_u_WARNING_img_size; -static PyObject *__pyx_n_u_Windows; -static PyObject *__pyx_n_u_YOLOV5_CONFIG_DIR; -static PyObject *__pyx_n_u_YOLOv5_VERBOSE; -static PyObject *__pyx_kp_u__10; -static PyObject *__pyx_kp_u__11; -static PyObject *__pyx_n_u__14; -static PyObject *__pyx_kp_u__15; -static PyObject *__pyx_kp_u__16; -static PyObject *__pyx_kp_u__17; -static PyObject *__pyx_kp_u__18; -static PyObject *__pyx_n_s__20; -static PyObject *__pyx_kp_u__20; -static PyObject *__pyx_kp_u__21; -static PyObject *__pyx_n_u__22; -static PyObject *__pyx_n_u__4; -static PyObject *__pyx_kp_u__5; -static PyObject *__pyx_kp_u__6; -static PyObject *__pyx_kp_u__7; -static PyObject *__pyx_n_s__9; -static PyObject *__pyx_kp_u__9; -static PyObject *__pyx_n_s_access; -static PyObject *__pyx_n_s_agnostic; -static PyObject *__pyx_n_s_any; -static PyObject *__pyx_n_s_args; -static PyObject *__pyx_n_s_argsort; -static PyObject *__pyx_n_s_array; -static PyObject *__pyx_n_s_as_tuple; -static PyObject *__pyx_n_u_ascii; -static PyObject *__pyx_n_s_astype; -static PyObject *__pyx_n_s_asyncio_coroutines; -static PyObject *__pyx_n_s_backends; -static PyObject *__pyx_n_s_basicConfig; -static PyObject *__pyx_n_s_benchmark; -static PyObject *__pyx_n_s_bincount; -static PyObject *__pyx_n_u_black; -static PyObject *__pyx_n_u_blue; -static PyObject *__pyx_n_u_bold; -static PyObject *__pyx_n_s_box; -static PyObject *__pyx_n_s_box_iou; -static PyObject *__pyx_n_s_boxes; -static PyObject *__pyx_n_u_bright_black; -static PyObject *__pyx_n_u_bright_blue; -static PyObject *__pyx_n_u_bright_cyan; -static PyObject *__pyx_n_u_bright_green; -static PyObject *__pyx_n_u_bright_magenta; -static PyObject *__pyx_n_u_bright_red; -static PyObject *__pyx_n_u_bright_white; -static PyObject *__pyx_n_u_bright_yellow; -static PyObject *__pyx_n_s_c; -static PyObject *__pyx_n_s_cat; -static PyObject *__pyx_n_s_ceil; -static PyObject *__pyx_n_s_cfg; -static PyObject *__pyx_n_s_check_img_size; -static PyObject *__pyx_n_s_check_python; -static PyObject *__pyx_n_s_check_version; -static PyObject *__pyx_n_s_clamp; -static PyObject *__pyx_n_s_class_counts; -static PyObject *__pyx_n_s_class_getitem; -static PyObject *__pyx_n_s_class_weights; -static PyObject *__pyx_n_s_classes; -static PyObject *__pyx_n_s_clean_str; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_n_s_clip; -static PyObject *__pyx_n_s_clip_coords; -static PyObject *__pyx_n_s_clone; -static PyObject *__pyx_n_s_close; -static PyObject *__pyx_n_s_colors; -static PyObject *__pyx_n_s_colorstr; -static PyObject *__pyx_n_s_colorstr_locals_genexpr; -static PyObject *__pyx_n_s_columns; -static PyObject *__pyx_n_s_concatenate; -static PyObject *__pyx_n_s_conf; -static PyObject *__pyx_n_s_conf_thres; -static PyObject *__pyx_kp_u_config; -static PyObject *__pyx_n_s_coords; -static PyObject *__pyx_n_s_copy; -static PyObject *__pyx_n_s_cos; -static PyObject *__pyx_n_s_cpu_count; -static PyObject *__pyx_n_s_cudnn; -static PyObject *__pyx_n_s_current; -static PyObject *__pyx_n_s_cv2; -static PyObject *__pyx_n_u_cyan; -static PyObject *__pyx_kp_u_d; -static PyObject *__pyx_n_s_d_2; -static PyObject *__pyx_n_s_da; -static PyObject *__pyx_n_s_db; -static PyObject *__pyx_n_s_decode; -static PyObject *__pyx_n_s_descending; -static PyObject *__pyx_n_s_deterministic; -static PyObject *__pyx_n_s_device; -static PyObject *__pyx_n_s_dir; -static PyObject *__pyx_n_s_dirs; -static PyObject *__pyx_kp_u_disable; -static PyObject *__pyx_n_s_display; -static PyObject *__pyx_n_s_divisor; -static PyObject *__pyx_n_s_e; -static PyObject *__pyx_n_s_emojis; -static PyObject *__pyx_kp_u_enable; -static PyObject *__pyx_n_s_encode; -static PyObject *__pyx_n_u_end; -static PyObject *__pyx_n_s_enter; -static PyObject *__pyx_n_s_enumerate; -static PyObject *__pyx_n_s_env; -static PyObject *__pyx_n_s_env_var; -static PyObject *__pyx_n_s_environ; -static PyObject *__pyx_n_s_eps; -static PyObject *__pyx_n_s_exclude; -static PyObject *__pyx_n_s_exist_ok; -static PyObject *__pyx_n_s_exists; -static PyObject *__pyx_n_s_exit; -static PyObject *__pyx_n_s_f; -static PyObject *__pyx_n_s_file; -static PyObject *__pyx_n_s_file_2; -static PyObject *__pyx_n_s_file_size; -static PyObject *__pyx_n_s_file_size_locals_genexpr; -static PyObject *__pyx_n_s_fitness; -static PyObject *__pyx_n_s_float; -static PyObject *__pyx_n_u_float_kind; -static PyObject *__pyx_n_s_floor; -static PyObject *__pyx_n_s_format; -static PyObject *__pyx_n_s_formatter; -static PyObject *__pyx_n_s_from_numpy; -static PyObject *__pyx_n_s_func; -static PyObject *__pyx_n_s_gain; -static PyObject *__pyx_kp_u_gc; -static PyObject *__pyx_n_s_genexpr; -static PyObject *__pyx_n_s_get; -static PyObject *__pyx_n_s_getLogger; -static PyObject *__pyx_n_s_get_latest_run; -static PyObject *__pyx_n_s_get_terminal_size; -static PyObject *__pyx_n_s_getctime; -static PyObject *__pyx_n_s_getenv; -static PyObject *__pyx_n_s_glob; -static PyObject *__pyx_n_u_green; -static PyObject *__pyx_n_s_groups; -static PyObject *__pyx_n_s_h; -static PyObject *__pyx_n_s_handler; -static PyObject *__pyx_n_s_hard; -static PyObject *__pyx_n_s_home; -static PyObject *__pyx_n_s_i; -static PyObject *__pyx_n_u_ignore; -static PyObject *__pyx_n_s_image_weights; -static PyObject *__pyx_n_s_img0_shape; -static PyObject *__pyx_n_s_img1_shape; -static PyObject *__pyx_n_s_imgsz; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_increment_path; -static PyObject *__pyx_n_s_info; -static PyObject *__pyx_n_s_init_seeds; -static PyObject *__pyx_n_s_initializing; -static PyObject *__pyx_n_s_input; -static PyObject *__pyx_n_s_instance; -static PyObject *__pyx_n_s_int; -static PyObject *__pyx_n_s_intersect_dicts; -static PyObject *__pyx_n_s_intersect_dicts_locals_genexpr; -static PyObject *__pyx_n_s_iou; -static PyObject *__pyx_n_s_iou_thres; -static PyObject *__pyx_n_s_is_ascii; -static PyObject *__pyx_n_s_is_chinese; -static PyObject *__pyx_n_s_is_coroutine; -static PyObject *__pyx_n_s_is_dir; -static PyObject *__pyx_n_s_is_file; -static PyObject *__pyx_n_s_is_writeable; -static PyObject *__pyx_kp_u_isenabled; -static PyObject *__pyx_n_s_items; -static PyObject *__pyx_n_s_j; -static PyObject *__pyx_n_s_k; -static PyObject *__pyx_n_s_keepdim; -static PyObject *__pyx_n_s_key; -static PyObject *__pyx_n_s_kwargs; -static PyObject *__pyx_n_s_l; -static PyObject *__pyx_n_s_labels; -static PyObject *__pyx_n_s_labels_to_class_weights; -static PyObject *__pyx_n_s_labels_to_image_weights; -static PyObject *__pyx_n_s_last_list; -static PyObject *__pyx_kp_u_last_pt; -static PyObject *__pyx_n_s_level; -static PyObject *__pyx_n_s_linewidth; -static PyObject *__pyx_n_s_logging; -static PyObject *__pyx_n_s_long; -static PyObject *__pyx_n_u_long; -static PyObject *__pyx_n_s_lower; -static PyObject *__pyx_n_s_m; -static PyObject *__pyx_n_u_magenta; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_make_divisible; -static PyObject *__pyx_n_s_manual_seed; -static PyObject *__pyx_n_s_matches; -static PyObject *__pyx_n_s_math; -static PyObject *__pyx_n_s_max; -static PyObject *__pyx_n_s_max_columns; -static PyObject *__pyx_n_s_max_det; -static PyObject *__pyx_n_s_max_nms; -static PyObject *__pyx_n_s_max_wh; -static PyObject *__pyx_n_s_merge; -static PyObject *__pyx_kp_u_message_s; -static PyObject *__pyx_n_s_methods; -static PyObject *__pyx_n_s_min_wh; -static PyObject *__pyx_n_s_minimum; -static PyObject *__pyx_n_s_minlength; -static PyObject *__pyx_n_s_mkdir; -static PyObject *__pyx_n_s_mm; -static PyObject *__pyx_n_s_multi_label; -static PyObject *__pyx_kp_u_must_be_multiple_of_max_stride; -static PyObject *__pyx_n_s_n; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_name_2; -static PyObject *__pyx_n_s_nc; -static PyObject *__pyx_n_s_new_size; -static PyObject *__pyx_n_s_nms; -static PyObject *__pyx_n_s_non_max_suppression; -static PyObject *__pyx_n_s_nonzero; -static PyObject *__pyx_n_s_np; -static PyObject *__pyx_n_s_numpy; -static PyObject *__pyx_n_s_one_cycle; -static PyObject *__pyx_n_s_one_cycle_locals_lambda; -static PyObject *__pyx_n_s_ones; -static PyObject *__pyx_n_s_open; -static PyObject *__pyx_n_s_ops; -static PyObject *__pyx_n_s_opt; -static PyObject *__pyx_n_s_options; -static PyObject *__pyx_n_s_os; -static PyObject *__pyx_n_s_output; -static PyObject *__pyx_n_s_pad; -static PyObject *__pyx_n_s_padh; -static PyObject *__pyx_n_s_padw; -static PyObject *__pyx_n_s_pandas; -static PyObject *__pyx_n_s_parents; -static PyObject *__pyx_n_s_parse; -static PyObject *__pyx_n_s_path; -static PyObject *__pyx_n_s_pathlib; -static PyObject *__pyx_n_s_pattern; -static PyObject *__pyx_n_s_pd; -static PyObject *__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils; -static PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2; -static PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3; -static PyObject *__pyx_n_s_pi; -static PyObject *__pyx_n_s_pinned; -static PyObject *__pyx_n_s_platform; -static PyObject *__pyx_n_s_precision; -static PyObject *__pyx_n_s_prediction; -static PyObject *__pyx_n_s_print; -static PyObject *__pyx_n_s_print_args; -static PyObject *__pyx_n_s_print_args_locals_genexpr; -static PyObject *__pyx_n_s_profile; -static PyObject *__pyx_n_s_python_version; -static PyObject *__pyx_n_s_random; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_rank; -static PyObject *__pyx_n_s_ratio_pad; -static PyObject *__pyx_n_s_re; -static PyObject *__pyx_n_s_recursive; -static PyObject *__pyx_n_u_red; -static PyObject *__pyx_n_s_redundant; -static PyObject *__pyx_n_s_repl; -static PyObject *__pyx_n_s_replace; -static PyObject *__pyx_n_s_reshape; -static PyObject *__pyx_n_s_resolve; -static PyObject *__pyx_n_s_s; -static PyObject *__pyx_kp_u_s_2; -static PyObject *__pyx_kp_u_s_exceeded; -static PyObject *__pyx_n_s_scale_coords; -static PyObject *__pyx_n_s_scores; -static PyObject *__pyx_n_s_search; -static PyObject *__pyx_n_s_search_dir; -static PyObject *__pyx_n_s_seed; -static PyObject *__pyx_n_s_send; -static PyObject *__pyx_n_s_sep; -static PyObject *__pyx_n_s_setNumThreads; -static PyObject *__pyx_n_s_set_logging; -static PyObject *__pyx_n_s_set_printoptions; -static PyObject *__pyx_n_s_shape; -static PyObject *__pyx_n_s_shutil; -static PyObject *__pyx_n_s_spec; -static PyObject *__pyx_n_s_split; -static PyObject *__pyx_n_s_st_size; -static PyObject *__pyx_n_s_startswith; -static PyObject *__pyx_n_s_stat; -static PyObject *__pyx_n_s_stem; -static PyObject *__pyx_n_s_steps; -static PyObject *__pyx_n_s_str; -static PyObject *__pyx_n_s_string; -static PyObject *__pyx_n_s_sub; -static PyObject *__pyx_n_s_suffix; -static PyObject *__pyx_n_s_sum; -static PyObject *__pyx_n_s_system; -static PyObject *__pyx_n_s_t; -static PyObject *__pyx_n_s_tensor; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_n_s_test_2; -static PyObject *__pyx_n_s_throw; -static PyObject *__pyx_n_s_time; -static PyObject *__pyx_n_s_time_limit; -static PyObject *__pyx_kp_u_tmp; -static PyObject *__pyx_kp_u_tmp_txt; -static PyObject *__pyx_n_s_torch; -static PyObject *__pyx_n_s_torch_backends_cudnn; -static PyObject *__pyx_n_s_torchvision; -static PyObject *__pyx_n_u_true; -static PyObject *__pyx_n_s_try_except; -static PyObject *__pyx_n_s_try_except_locals_handler; -static PyObject *__pyx_n_u_underline; -static PyObject *__pyx_n_s_unlink; -static PyObject *__pyx_n_s_unquote; -static PyObject *__pyx_kp_u_updating_to; -static PyObject *__pyx_n_s_url; -static PyObject *__pyx_n_s_url2file; -static PyObject *__pyx_n_s_urllib; -static PyObject *__pyx_n_s_user_config_dir; -static PyObject *__pyx_n_s_v; -static PyObject *__pyx_kp_u_valid_values_are_between_0_0_an; -static PyObject *__pyx_n_s_vars; -static PyObject *__pyx_n_s_verbose; -static PyObject *__pyx_kp_u_version; -static PyObject *__pyx_n_s_view; -static PyObject *__pyx_n_s_w; -static PyObject *__pyx_n_u_w; -static PyObject *__pyx_n_s_warning; -static PyObject *__pyx_n_s_weights; -static PyObject *__pyx_n_u_white; -static PyObject *__pyx_n_s_with_suffix; -static PyObject *__pyx_n_s_x; -static PyObject *__pyx_n_s_xc; -static PyObject *__pyx_n_s_xi; -static PyObject *__pyx_n_s_xyn2xy; -static PyObject *__pyx_n_s_xywh2xyxy; -static PyObject *__pyx_n_s_xywhn2xyxy; -static PyObject *__pyx_n_s_xyxy2xywh; -static PyObject *__pyx_n_s_xyxy2xywhn; -static PyObject *__pyx_n_s_y; -static PyObject *__pyx_n_s_y1; -static PyObject *__pyx_n_s_y2; -static PyObject *__pyx_n_s_yaml; -static PyObject *__pyx_n_u_yellow; -static PyObject *__pyx_n_u_yolov5; -static PyObject *__pyx_n_s_zeros; -#endif -/* #### Code section: decls ### */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_64__defaults__(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_set_logging(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_name, PyObject *__pyx_v_verbose); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_handler(PyObject *__pyx_self, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_2try_except(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_4methods(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_instance); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_genexpr(PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_6print_args(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_name, PyObject *__pyx_v_opt); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8init_seeds(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_seed); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15intersect_dicts_8genexpr2_genexpr(PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10intersect_dicts(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_da, PyObject *__pyx_v_db, PyObject *__pyx_v_exclude); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_12get_latest_run(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_search_dir); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_14user_config_dir(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dir, PyObject *__pyx_v_env_var); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_16is_writeable(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dir, PyObject *__pyx_v_test); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_18is_ascii(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_s); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_20is_chinese(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_s); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_22emojis(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_str); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_genexpr(PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_24file_size(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_path); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_26check_python(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_minimum); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_28check_version(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_current, CYTHON_UNUSED PyObject *__pyx_v_minimum, CYTHON_UNUSED PyObject *__pyx_v_name, CYTHON_UNUSED PyObject *__pyx_v_pinned, CYTHON_UNUSED PyObject *__pyx_v_hard, CYTHON_UNUSED PyObject *__pyx_v_verbose); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_30check_img_size(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_imgsz, PyObject *__pyx_v_s, PyObject *__pyx_v_floor); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_32url2file(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_34make_divisible(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_divisor); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_36clean_str(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_s); /* proto */ -static PyObject *__pyx_lambda_funcdef_lambda3(PyObject *__pyx_self, PyObject *__pyx_v_x); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_38one_cycle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_y1, PyObject *__pyx_v_y2, PyObject *__pyx_v_steps); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_genexpr(PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_40colorstr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_42labels_to_class_weights(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_labels, PyObject *__pyx_v_nc); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_66__defaults__(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_44labels_to_image_weights(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_labels, PyObject *__pyx_v_nc, PyObject *__pyx_v_class_weights); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_46xyxy2xywh(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_48xywh2xyxy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_50xywhn2xyxy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_w, PyObject *__pyx_v_h, PyObject *__pyx_v_padw, PyObject *__pyx_v_padh); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_52xyxy2xywhn(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_w, PyObject *__pyx_v_h, PyObject *__pyx_v_clip, PyObject *__pyx_v_eps); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_54xyn2xy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_w, PyObject *__pyx_v_h, PyObject *__pyx_v_padw, PyObject *__pyx_v_padh); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_56scale_coords(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_img1_shape, PyObject *__pyx_v_coords, PyObject *__pyx_v_img0_shape, PyObject *__pyx_v_ratio_pad); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_58clip_coords(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_boxes, PyObject *__pyx_v_shape); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_60non_max_suppression(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_prediction, PyObject *__pyx_v_conf_thres, PyObject *__pyx_v_iou_thres, PyObject *__pyx_v_classes, PyObject *__pyx_v_agnostic, PyObject *__pyx_v_multi_label, PyObject *__pyx_v_labels, PyObject *__pyx_v_max_det); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_62increment_path(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_path, PyObject *__pyx_v_exist_ok, PyObject *__pyx_v_sep, PyObject *__pyx_v_mkdir); /* proto */ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static __Pyx_CachedCFunction __pyx_umethod_PyDict_Type_get = {0, 0, 0, 0, 0}; -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_float_0_0; -static PyObject *__pyx_float_1_0; -static PyObject *__pyx_float_1E6; -static PyObject *__pyx_float_3E3; -static PyObject *__pyx_float_0_25; -static PyObject *__pyx_float_0_45; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_3; -static PyObject *__pyx_int_4; -static PyObject *__pyx_int_5; -static PyObject *__pyx_int_6; -static PyObject *__pyx_int_10; -static PyObject *__pyx_int_32; -static PyObject *__pyx_int_80; -static PyObject *__pyx_int_100; -static PyObject *__pyx_int_300; -static PyObject *__pyx_int_320; -static PyObject *__pyx_int_640; -static PyObject *__pyx_int_neg_1; -#endif -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_tuple_; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_slice__23; -static PyObject *__pyx_slice__28; -static PyObject *__pyx_slice__33; -static PyObject *__pyx_slice__36; -static PyObject *__pyx_slice__38; -static PyObject *__pyx_slice__42; -static PyObject *__pyx_tuple__12; -static PyObject *__pyx_tuple__13; -static PyObject *__pyx_tuple__19; -static PyObject *__pyx_tuple__24; -static PyObject *__pyx_tuple__25; -static PyObject *__pyx_tuple__26; -static PyObject *__pyx_tuple__27; -static PyObject *__pyx_tuple__29; -static PyObject *__pyx_tuple__30; -static PyObject *__pyx_tuple__31; -static PyObject *__pyx_tuple__32; -static PyObject *__pyx_tuple__34; -static PyObject *__pyx_tuple__35; -static PyObject *__pyx_tuple__37; -static PyObject *__pyx_tuple__39; -static PyObject *__pyx_tuple__40; -static PyObject *__pyx_tuple__41; -static PyObject *__pyx_tuple__43; -static PyObject *__pyx_tuple__44; -static PyObject *__pyx_tuple__45; -static PyObject *__pyx_tuple__46; -static PyObject *__pyx_tuple__48; -static PyObject *__pyx_tuple__49; -static PyObject *__pyx_tuple__51; -static PyObject *__pyx_tuple__53; -static PyObject *__pyx_tuple__55; -static PyObject *__pyx_tuple__57; -static PyObject *__pyx_tuple__58; -static PyObject *__pyx_tuple__60; -static PyObject *__pyx_tuple__61; -static PyObject *__pyx_tuple__63; -static PyObject *__pyx_tuple__64; -static PyObject *__pyx_tuple__66; -static PyObject *__pyx_tuple__67; -static PyObject *__pyx_tuple__69; -static PyObject *__pyx_tuple__70; -static PyObject *__pyx_tuple__72; -static PyObject *__pyx_tuple__74; -static PyObject *__pyx_tuple__75; -static PyObject *__pyx_tuple__77; -static PyObject *__pyx_tuple__78; -static PyObject *__pyx_tuple__80; -static PyObject *__pyx_tuple__82; -static PyObject *__pyx_tuple__83; -static PyObject *__pyx_tuple__85; -static PyObject *__pyx_tuple__86; -static PyObject *__pyx_tuple__88; -static PyObject *__pyx_tuple__89; -static PyObject *__pyx_tuple__91; -static PyObject *__pyx_tuple__94; -static PyObject *__pyx_tuple__96; -static PyObject *__pyx_tuple__97; -static PyObject *__pyx_tuple__99; -static PyObject *__pyx_codeobj__3; -static PyObject *__pyx_tuple__101; -static PyObject *__pyx_tuple__102; -static PyObject *__pyx_tuple__104; -static PyObject *__pyx_tuple__105; -static PyObject *__pyx_tuple__108; -static PyObject *__pyx_tuple__110; -static PyObject *__pyx_tuple__111; -static PyObject *__pyx_tuple__113; -static PyObject *__pyx_tuple__115; -static PyObject *__pyx_tuple__116; -static PyObject *__pyx_tuple__118; -static PyObject *__pyx_tuple__119; -static PyObject *__pyx_tuple__121; -static PyObject *__pyx_tuple__123; -static PyObject *__pyx_tuple__124; -static PyObject *__pyx_tuple__126; -static PyObject *__pyx_codeobj__47; -static PyObject *__pyx_codeobj__50; -static PyObject *__pyx_codeobj__52; -static PyObject *__pyx_codeobj__54; -static PyObject *__pyx_codeobj__56; -static PyObject *__pyx_codeobj__59; -static PyObject *__pyx_codeobj__62; -static PyObject *__pyx_codeobj__65; -static PyObject *__pyx_codeobj__68; -static PyObject *__pyx_codeobj__71; -static PyObject *__pyx_codeobj__73; -static PyObject *__pyx_codeobj__76; -static PyObject *__pyx_codeobj__79; -static PyObject *__pyx_codeobj__81; -static PyObject *__pyx_codeobj__84; -static PyObject *__pyx_codeobj__87; -static PyObject *__pyx_codeobj__90; -static PyObject *__pyx_codeobj__92; -static PyObject *__pyx_codeobj__93; -static PyObject *__pyx_codeobj__95; -static PyObject *__pyx_codeobj__98; -static PyObject *__pyx_codeobj__100; -static PyObject *__pyx_codeobj__103; -static PyObject *__pyx_codeobj__106; -static PyObject *__pyx_codeobj__107; -static PyObject *__pyx_codeobj__109; -static PyObject *__pyx_codeobj__112; -static PyObject *__pyx_codeobj__114; -static PyObject *__pyx_codeobj__117; -static PyObject *__pyx_codeobj__120; -static PyObject *__pyx_codeobj__122; -static PyObject *__pyx_codeobj__125; -#endif -/* #### Code section: late_includes ### */ -/* #### Code section: module_state ### */ -#if CYTHON_USE_MODULE_STATE -typedef struct { - PyObject *__pyx_d; - PyObject *__pyx_b; - PyObject *__pyx_cython_runtime; - PyObject *__pyx_empty_tuple; - PyObject *__pyx_empty_bytes; - PyObject *__pyx_empty_unicode; - #ifdef __Pyx_CyFunction_USED - PyTypeObject *__pyx_CyFunctionType; - #endif - #ifdef __Pyx_FusedFunction_USED - PyTypeObject *__pyx_FusedFunctionType; - #endif - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr; - PyTypeObject *__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr; - PyObject *__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr; - PyObject *__pyx_kp_u_0_0_0; - PyObject *__pyx_kp_u_0m; - PyObject *__pyx_kp_u_11_5g; - PyObject *__pyx_kp_u_1m; - PyObject *__pyx_kp_u_30m; - PyObject *__pyx_kp_u_31m; - PyObject *__pyx_kp_u_32m; - PyObject *__pyx_kp_u_33m; - PyObject *__pyx_kp_u_34m; - PyObject *__pyx_kp_u_35m; - PyObject *__pyx_kp_u_36m; - PyObject *__pyx_kp_u_37m; - PyObject *__pyx_kp_u_3_6_2; - PyObject *__pyx_kp_u_4m; - PyObject *__pyx_kp_u_90m; - PyObject *__pyx_kp_u_91m; - PyObject *__pyx_kp_u_92m; - PyObject *__pyx_kp_u_93m; - PyObject *__pyx_kp_u_94m; - PyObject *__pyx_kp_u_95m; - PyObject *__pyx_kp_u_96m; - PyObject *__pyx_kp_u_97m; - PyObject *__pyx_kp_u_AppData_Roaming; - PyObject *__pyx_n_s_AssertionError; - PyObject *__pyx_n_u_Darwin; - PyObject *__pyx_n_s_FILE; - PyObject *__pyx_n_s_INFO; - PyObject *__pyx_kp_u_Invalid_Confidence_threshold; - PyObject *__pyx_kp_u_Invalid_IoU; - PyObject *__pyx_n_s_LOGGER; - PyObject *__pyx_kp_u_Library_Application_Support; - PyObject *__pyx_n_u_Linux; - PyObject *__pyx_n_s_NCOLS; - PyObject *__pyx_n_u_NUMEXPR_MAX_THREADS; - PyObject *__pyx_n_s_NUM_THREADS; - PyObject *__pyx_n_s_OSError; - PyObject *__pyx_n_s_Path; - PyObject *__pyx_kp_u_Python; - PyObject *__pyx_n_u_RANK; - PyObject *__pyx_n_s_ROOT; - PyObject *__pyx_n_s_R_OK; - PyObject *__pyx_n_s_T; - PyObject *__pyx_n_s_Tensor; - PyObject *__pyx_n_u_Ultralytics; - PyObject *__pyx_n_s_VERBOSE; - PyObject *__pyx_n_s_WARNING; - PyObject *__pyx_kp_u_WARNING_NMS_time_limit; - PyObject *__pyx_kp_u_WARNING_img_size; - PyObject *__pyx_n_u_Windows; - PyObject *__pyx_n_u_YOLOV5_CONFIG_DIR; - PyObject *__pyx_n_u_YOLOv5_VERBOSE; - PyObject *__pyx_kp_u__10; - PyObject *__pyx_kp_u__11; - PyObject *__pyx_n_u__14; - PyObject *__pyx_kp_u__15; - PyObject *__pyx_kp_u__16; - PyObject *__pyx_kp_u__17; - PyObject *__pyx_kp_u__18; - PyObject *__pyx_n_s__20; - PyObject *__pyx_kp_u__20; - PyObject *__pyx_kp_u__21; - PyObject *__pyx_n_u__22; - PyObject *__pyx_n_u__4; - PyObject *__pyx_kp_u__5; - PyObject *__pyx_kp_u__6; - PyObject *__pyx_kp_u__7; - PyObject *__pyx_n_s__9; - PyObject *__pyx_kp_u__9; - PyObject *__pyx_n_s_access; - PyObject *__pyx_n_s_agnostic; - PyObject *__pyx_n_s_any; - PyObject *__pyx_n_s_args; - PyObject *__pyx_n_s_argsort; - PyObject *__pyx_n_s_array; - PyObject *__pyx_n_s_as_tuple; - PyObject *__pyx_n_u_ascii; - PyObject *__pyx_n_s_astype; - PyObject *__pyx_n_s_asyncio_coroutines; - PyObject *__pyx_n_s_backends; - PyObject *__pyx_n_s_basicConfig; - PyObject *__pyx_n_s_benchmark; - PyObject *__pyx_n_s_bincount; - PyObject *__pyx_n_u_black; - PyObject *__pyx_n_u_blue; - PyObject *__pyx_n_u_bold; - PyObject *__pyx_n_s_box; - PyObject *__pyx_n_s_box_iou; - PyObject *__pyx_n_s_boxes; - PyObject *__pyx_n_u_bright_black; - PyObject *__pyx_n_u_bright_blue; - PyObject *__pyx_n_u_bright_cyan; - PyObject *__pyx_n_u_bright_green; - PyObject *__pyx_n_u_bright_magenta; - PyObject *__pyx_n_u_bright_red; - PyObject *__pyx_n_u_bright_white; - PyObject *__pyx_n_u_bright_yellow; - PyObject *__pyx_n_s_c; - PyObject *__pyx_n_s_cat; - PyObject *__pyx_n_s_ceil; - PyObject *__pyx_n_s_cfg; - PyObject *__pyx_n_s_check_img_size; - PyObject *__pyx_n_s_check_python; - PyObject *__pyx_n_s_check_version; - PyObject *__pyx_n_s_clamp; - PyObject *__pyx_n_s_class_counts; - PyObject *__pyx_n_s_class_getitem; - PyObject *__pyx_n_s_class_weights; - PyObject *__pyx_n_s_classes; - PyObject *__pyx_n_s_clean_str; - PyObject *__pyx_n_s_cline_in_traceback; - PyObject *__pyx_n_s_clip; - PyObject *__pyx_n_s_clip_coords; - PyObject *__pyx_n_s_clone; - PyObject *__pyx_n_s_close; - PyObject *__pyx_n_s_colors; - PyObject *__pyx_n_s_colorstr; - PyObject *__pyx_n_s_colorstr_locals_genexpr; - PyObject *__pyx_n_s_columns; - PyObject *__pyx_n_s_concatenate; - PyObject *__pyx_n_s_conf; - PyObject *__pyx_n_s_conf_thres; - PyObject *__pyx_kp_u_config; - PyObject *__pyx_n_s_coords; - PyObject *__pyx_n_s_copy; - PyObject *__pyx_n_s_cos; - PyObject *__pyx_n_s_cpu_count; - PyObject *__pyx_n_s_cudnn; - PyObject *__pyx_n_s_current; - PyObject *__pyx_n_s_cv2; - PyObject *__pyx_n_u_cyan; - PyObject *__pyx_kp_u_d; - PyObject *__pyx_n_s_d_2; - PyObject *__pyx_n_s_da; - PyObject *__pyx_n_s_db; - PyObject *__pyx_n_s_decode; - PyObject *__pyx_n_s_descending; - PyObject *__pyx_n_s_deterministic; - PyObject *__pyx_n_s_device; - PyObject *__pyx_n_s_dir; - PyObject *__pyx_n_s_dirs; - PyObject *__pyx_kp_u_disable; - PyObject *__pyx_n_s_display; - PyObject *__pyx_n_s_divisor; - PyObject *__pyx_n_s_e; - PyObject *__pyx_n_s_emojis; - PyObject *__pyx_kp_u_enable; - PyObject *__pyx_n_s_encode; - PyObject *__pyx_n_u_end; - PyObject *__pyx_n_s_enter; - PyObject *__pyx_n_s_enumerate; - PyObject *__pyx_n_s_env; - PyObject *__pyx_n_s_env_var; - PyObject *__pyx_n_s_environ; - PyObject *__pyx_n_s_eps; - PyObject *__pyx_n_s_exclude; - PyObject *__pyx_n_s_exist_ok; - PyObject *__pyx_n_s_exists; - PyObject *__pyx_n_s_exit; - PyObject *__pyx_n_s_f; - PyObject *__pyx_n_s_file; - PyObject *__pyx_n_s_file_2; - PyObject *__pyx_n_s_file_size; - PyObject *__pyx_n_s_file_size_locals_genexpr; - PyObject *__pyx_n_s_fitness; - PyObject *__pyx_n_s_float; - PyObject *__pyx_n_u_float_kind; - PyObject *__pyx_n_s_floor; - PyObject *__pyx_n_s_format; - PyObject *__pyx_n_s_formatter; - PyObject *__pyx_n_s_from_numpy; - PyObject *__pyx_n_s_func; - PyObject *__pyx_n_s_gain; - PyObject *__pyx_kp_u_gc; - PyObject *__pyx_n_s_genexpr; - PyObject *__pyx_n_s_get; - PyObject *__pyx_n_s_getLogger; - PyObject *__pyx_n_s_get_latest_run; - PyObject *__pyx_n_s_get_terminal_size; - PyObject *__pyx_n_s_getctime; - PyObject *__pyx_n_s_getenv; - PyObject *__pyx_n_s_glob; - PyObject *__pyx_n_u_green; - PyObject *__pyx_n_s_groups; - PyObject *__pyx_n_s_h; - PyObject *__pyx_n_s_handler; - PyObject *__pyx_n_s_hard; - PyObject *__pyx_n_s_home; - PyObject *__pyx_n_s_i; - PyObject *__pyx_n_u_ignore; - PyObject *__pyx_n_s_image_weights; - PyObject *__pyx_n_s_img0_shape; - PyObject *__pyx_n_s_img1_shape; - PyObject *__pyx_n_s_imgsz; - PyObject *__pyx_n_s_import; - PyObject *__pyx_n_s_increment_path; - PyObject *__pyx_n_s_info; - PyObject *__pyx_n_s_init_seeds; - PyObject *__pyx_n_s_initializing; - PyObject *__pyx_n_s_input; - PyObject *__pyx_n_s_instance; - PyObject *__pyx_n_s_int; - PyObject *__pyx_n_s_intersect_dicts; - PyObject *__pyx_n_s_intersect_dicts_locals_genexpr; - PyObject *__pyx_n_s_iou; - PyObject *__pyx_n_s_iou_thres; - PyObject *__pyx_n_s_is_ascii; - PyObject *__pyx_n_s_is_chinese; - PyObject *__pyx_n_s_is_coroutine; - PyObject *__pyx_n_s_is_dir; - PyObject *__pyx_n_s_is_file; - PyObject *__pyx_n_s_is_writeable; - PyObject *__pyx_kp_u_isenabled; - PyObject *__pyx_n_s_items; - PyObject *__pyx_n_s_j; - PyObject *__pyx_n_s_k; - PyObject *__pyx_n_s_keepdim; - PyObject *__pyx_n_s_key; - PyObject *__pyx_n_s_kwargs; - PyObject *__pyx_n_s_l; - PyObject *__pyx_n_s_labels; - PyObject *__pyx_n_s_labels_to_class_weights; - PyObject *__pyx_n_s_labels_to_image_weights; - PyObject *__pyx_n_s_last_list; - PyObject *__pyx_kp_u_last_pt; - PyObject *__pyx_n_s_level; - PyObject *__pyx_n_s_linewidth; - PyObject *__pyx_n_s_logging; - PyObject *__pyx_n_s_long; - PyObject *__pyx_n_u_long; - PyObject *__pyx_n_s_lower; - PyObject *__pyx_n_s_m; - PyObject *__pyx_n_u_magenta; - PyObject *__pyx_n_s_main; - PyObject *__pyx_n_s_make_divisible; - PyObject *__pyx_n_s_manual_seed; - PyObject *__pyx_n_s_matches; - PyObject *__pyx_n_s_math; - PyObject *__pyx_n_s_max; - PyObject *__pyx_n_s_max_columns; - PyObject *__pyx_n_s_max_det; - PyObject *__pyx_n_s_max_nms; - PyObject *__pyx_n_s_max_wh; - PyObject *__pyx_n_s_merge; - PyObject *__pyx_kp_u_message_s; - PyObject *__pyx_n_s_methods; - PyObject *__pyx_n_s_min_wh; - PyObject *__pyx_n_s_minimum; - PyObject *__pyx_n_s_minlength; - PyObject *__pyx_n_s_mkdir; - PyObject *__pyx_n_s_mm; - PyObject *__pyx_n_s_multi_label; - PyObject *__pyx_kp_u_must_be_multiple_of_max_stride; - PyObject *__pyx_n_s_n; - PyObject *__pyx_n_s_name; - PyObject *__pyx_n_s_name_2; - PyObject *__pyx_n_s_nc; - PyObject *__pyx_n_s_new_size; - PyObject *__pyx_n_s_nms; - PyObject *__pyx_n_s_non_max_suppression; - PyObject *__pyx_n_s_nonzero; - PyObject *__pyx_n_s_np; - PyObject *__pyx_n_s_numpy; - PyObject *__pyx_n_s_one_cycle; - PyObject *__pyx_n_s_one_cycle_locals_lambda; - PyObject *__pyx_n_s_ones; - PyObject *__pyx_n_s_open; - PyObject *__pyx_n_s_ops; - PyObject *__pyx_n_s_opt; - PyObject *__pyx_n_s_options; - PyObject *__pyx_n_s_os; - PyObject *__pyx_n_s_output; - PyObject *__pyx_n_s_pad; - PyObject *__pyx_n_s_padh; - PyObject *__pyx_n_s_padw; - PyObject *__pyx_n_s_pandas; - PyObject *__pyx_n_s_parents; - PyObject *__pyx_n_s_parse; - PyObject *__pyx_n_s_path; - PyObject *__pyx_n_s_pathlib; - PyObject *__pyx_n_s_pattern; - PyObject *__pyx_n_s_pd; - PyObject *__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils; - PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2; - PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3; - PyObject *__pyx_n_s_pi; - PyObject *__pyx_n_s_pinned; - PyObject *__pyx_n_s_platform; - PyObject *__pyx_n_s_precision; - PyObject *__pyx_n_s_prediction; - PyObject *__pyx_n_s_print; - PyObject *__pyx_n_s_print_args; - PyObject *__pyx_n_s_print_args_locals_genexpr; - PyObject *__pyx_n_s_profile; - PyObject *__pyx_n_s_python_version; - PyObject *__pyx_n_s_random; - PyObject *__pyx_n_s_range; - PyObject *__pyx_n_s_rank; - PyObject *__pyx_n_s_ratio_pad; - PyObject *__pyx_n_s_re; - PyObject *__pyx_n_s_recursive; - PyObject *__pyx_n_u_red; - PyObject *__pyx_n_s_redundant; - PyObject *__pyx_n_s_repl; - PyObject *__pyx_n_s_replace; - PyObject *__pyx_n_s_reshape; - PyObject *__pyx_n_s_resolve; - PyObject *__pyx_n_s_s; - PyObject *__pyx_kp_u_s_2; - PyObject *__pyx_kp_u_s_exceeded; - PyObject *__pyx_n_s_scale_coords; - PyObject *__pyx_n_s_scores; - PyObject *__pyx_n_s_search; - PyObject *__pyx_n_s_search_dir; - PyObject *__pyx_n_s_seed; - PyObject *__pyx_n_s_send; - PyObject *__pyx_n_s_sep; - PyObject *__pyx_n_s_setNumThreads; - PyObject *__pyx_n_s_set_logging; - PyObject *__pyx_n_s_set_printoptions; - PyObject *__pyx_n_s_shape; - PyObject *__pyx_n_s_shutil; - PyObject *__pyx_n_s_spec; - PyObject *__pyx_n_s_split; - PyObject *__pyx_n_s_st_size; - PyObject *__pyx_n_s_startswith; - PyObject *__pyx_n_s_stat; - PyObject *__pyx_n_s_stem; - PyObject *__pyx_n_s_steps; - PyObject *__pyx_n_s_str; - PyObject *__pyx_n_s_string; - PyObject *__pyx_n_s_sub; - PyObject *__pyx_n_s_suffix; - PyObject *__pyx_n_s_sum; - PyObject *__pyx_n_s_system; - PyObject *__pyx_n_s_t; - PyObject *__pyx_n_s_tensor; - PyObject *__pyx_n_s_test; - PyObject *__pyx_n_s_test_2; - PyObject *__pyx_n_s_throw; - PyObject *__pyx_n_s_time; - PyObject *__pyx_n_s_time_limit; - PyObject *__pyx_kp_u_tmp; - PyObject *__pyx_kp_u_tmp_txt; - PyObject *__pyx_n_s_torch; - PyObject *__pyx_n_s_torch_backends_cudnn; - PyObject *__pyx_n_s_torchvision; - PyObject *__pyx_n_u_true; - PyObject *__pyx_n_s_try_except; - PyObject *__pyx_n_s_try_except_locals_handler; - PyObject *__pyx_n_u_underline; - PyObject *__pyx_n_s_unlink; - PyObject *__pyx_n_s_unquote; - PyObject *__pyx_kp_u_updating_to; - PyObject *__pyx_n_s_url; - PyObject *__pyx_n_s_url2file; - PyObject *__pyx_n_s_urllib; - PyObject *__pyx_n_s_user_config_dir; - PyObject *__pyx_n_s_v; - PyObject *__pyx_kp_u_valid_values_are_between_0_0_an; - PyObject *__pyx_n_s_vars; - PyObject *__pyx_n_s_verbose; - PyObject *__pyx_kp_u_version; - PyObject *__pyx_n_s_view; - PyObject *__pyx_n_s_w; - PyObject *__pyx_n_u_w; - PyObject *__pyx_n_s_warning; - PyObject *__pyx_n_s_weights; - PyObject *__pyx_n_u_white; - PyObject *__pyx_n_s_with_suffix; - PyObject *__pyx_n_s_x; - PyObject *__pyx_n_s_xc; - PyObject *__pyx_n_s_xi; - PyObject *__pyx_n_s_xyn2xy; - PyObject *__pyx_n_s_xywh2xyxy; - PyObject *__pyx_n_s_xywhn2xyxy; - PyObject *__pyx_n_s_xyxy2xywh; - PyObject *__pyx_n_s_xyxy2xywhn; - PyObject *__pyx_n_s_y; - PyObject *__pyx_n_s_y1; - PyObject *__pyx_n_s_y2; - PyObject *__pyx_n_s_yaml; - PyObject *__pyx_n_u_yellow; - PyObject *__pyx_n_u_yolov5; - PyObject *__pyx_n_s_zeros; - PyObject *__pyx_float_0_0; - PyObject *__pyx_float_1_0; - PyObject *__pyx_float_1E6; - PyObject *__pyx_float_3E3; - PyObject *__pyx_float_0_25; - PyObject *__pyx_float_0_45; - PyObject *__pyx_int_0; - PyObject *__pyx_int_1; - PyObject *__pyx_int_2; - PyObject *__pyx_int_3; - PyObject *__pyx_int_4; - PyObject *__pyx_int_5; - PyObject *__pyx_int_6; - PyObject *__pyx_int_10; - PyObject *__pyx_int_32; - PyObject *__pyx_int_80; - PyObject *__pyx_int_100; - PyObject *__pyx_int_300; - PyObject *__pyx_int_320; - PyObject *__pyx_int_640; - PyObject *__pyx_int_neg_1; - PyObject *__pyx_tuple_; - PyObject *__pyx_tuple__2; - PyObject *__pyx_tuple__8; - PyObject *__pyx_slice__23; - PyObject *__pyx_slice__28; - PyObject *__pyx_slice__33; - PyObject *__pyx_slice__36; - PyObject *__pyx_slice__38; - PyObject *__pyx_slice__42; - PyObject *__pyx_tuple__12; - PyObject *__pyx_tuple__13; - PyObject *__pyx_tuple__19; - PyObject *__pyx_tuple__24; - PyObject *__pyx_tuple__25; - PyObject *__pyx_tuple__26; - PyObject *__pyx_tuple__27; - PyObject *__pyx_tuple__29; - PyObject *__pyx_tuple__30; - PyObject *__pyx_tuple__31; - PyObject *__pyx_tuple__32; - PyObject *__pyx_tuple__34; - PyObject *__pyx_tuple__35; - PyObject *__pyx_tuple__37; - PyObject *__pyx_tuple__39; - PyObject *__pyx_tuple__40; - PyObject *__pyx_tuple__41; - PyObject *__pyx_tuple__43; - PyObject *__pyx_tuple__44; - PyObject *__pyx_tuple__45; - PyObject *__pyx_tuple__46; - PyObject *__pyx_tuple__48; - PyObject *__pyx_tuple__49; - PyObject *__pyx_tuple__51; - PyObject *__pyx_tuple__53; - PyObject *__pyx_tuple__55; - PyObject *__pyx_tuple__57; - PyObject *__pyx_tuple__58; - PyObject *__pyx_tuple__60; - PyObject *__pyx_tuple__61; - PyObject *__pyx_tuple__63; - PyObject *__pyx_tuple__64; - PyObject *__pyx_tuple__66; - PyObject *__pyx_tuple__67; - PyObject *__pyx_tuple__69; - PyObject *__pyx_tuple__70; - PyObject *__pyx_tuple__72; - PyObject *__pyx_tuple__74; - PyObject *__pyx_tuple__75; - PyObject *__pyx_tuple__77; - PyObject *__pyx_tuple__78; - PyObject *__pyx_tuple__80; - PyObject *__pyx_tuple__82; - PyObject *__pyx_tuple__83; - PyObject *__pyx_tuple__85; - PyObject *__pyx_tuple__86; - PyObject *__pyx_tuple__88; - PyObject *__pyx_tuple__89; - PyObject *__pyx_tuple__91; - PyObject *__pyx_tuple__94; - PyObject *__pyx_tuple__96; - PyObject *__pyx_tuple__97; - PyObject *__pyx_tuple__99; - PyObject *__pyx_codeobj__3; - PyObject *__pyx_tuple__101; - PyObject *__pyx_tuple__102; - PyObject *__pyx_tuple__104; - PyObject *__pyx_tuple__105; - PyObject *__pyx_tuple__108; - PyObject *__pyx_tuple__110; - PyObject *__pyx_tuple__111; - PyObject *__pyx_tuple__113; - PyObject *__pyx_tuple__115; - PyObject *__pyx_tuple__116; - PyObject *__pyx_tuple__118; - PyObject *__pyx_tuple__119; - PyObject *__pyx_tuple__121; - PyObject *__pyx_tuple__123; - PyObject *__pyx_tuple__124; - PyObject *__pyx_tuple__126; - PyObject *__pyx_codeobj__47; - PyObject *__pyx_codeobj__50; - PyObject *__pyx_codeobj__52; - PyObject *__pyx_codeobj__54; - PyObject *__pyx_codeobj__56; - PyObject *__pyx_codeobj__59; - PyObject *__pyx_codeobj__62; - PyObject *__pyx_codeobj__65; - PyObject *__pyx_codeobj__68; - PyObject *__pyx_codeobj__71; - PyObject *__pyx_codeobj__73; - PyObject *__pyx_codeobj__76; - PyObject *__pyx_codeobj__79; - PyObject *__pyx_codeobj__81; - PyObject *__pyx_codeobj__84; - PyObject *__pyx_codeobj__87; - PyObject *__pyx_codeobj__90; - PyObject *__pyx_codeobj__92; - PyObject *__pyx_codeobj__93; - PyObject *__pyx_codeobj__95; - PyObject *__pyx_codeobj__98; - PyObject *__pyx_codeobj__100; - PyObject *__pyx_codeobj__103; - PyObject *__pyx_codeobj__106; - PyObject *__pyx_codeobj__107; - PyObject *__pyx_codeobj__109; - PyObject *__pyx_codeobj__112; - PyObject *__pyx_codeobj__114; - PyObject *__pyx_codeobj__117; - PyObject *__pyx_codeobj__120; - PyObject *__pyx_codeobj__122; - PyObject *__pyx_codeobj__125; -} __pyx_mstate; - -#ifdef __cplusplus -namespace { - extern struct PyModuleDef __pyx_moduledef; -} /* anonymous namespace */ -#else -static struct PyModuleDef __pyx_moduledef; -#endif - -#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o)) - -#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef))) - -#define __pyx_m (PyState_FindModule(&__pyx_moduledef)) -#endif -/* #### Code section: module_state_clear ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_clear(PyObject *m) { - __pyx_mstate *clear_module_state = __pyx_mstate(m); - if (!clear_module_state) return 0; - Py_CLEAR(clear_module_state->__pyx_d); - Py_CLEAR(clear_module_state->__pyx_b); - Py_CLEAR(clear_module_state->__pyx_cython_runtime); - Py_CLEAR(clear_module_state->__pyx_empty_tuple); - Py_CLEAR(clear_module_state->__pyx_empty_bytes); - Py_CLEAR(clear_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_CLEAR(clear_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_CLEAR(clear_module_state->__pyx_FusedFunctionType); - #endif - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr); - Py_CLEAR(clear_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr); - Py_CLEAR(clear_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr); - Py_CLEAR(clear_module_state->__pyx_kp_u_0_0_0); - Py_CLEAR(clear_module_state->__pyx_kp_u_0m); - Py_CLEAR(clear_module_state->__pyx_kp_u_11_5g); - Py_CLEAR(clear_module_state->__pyx_kp_u_1m); - Py_CLEAR(clear_module_state->__pyx_kp_u_30m); - Py_CLEAR(clear_module_state->__pyx_kp_u_31m); - Py_CLEAR(clear_module_state->__pyx_kp_u_32m); - Py_CLEAR(clear_module_state->__pyx_kp_u_33m); - Py_CLEAR(clear_module_state->__pyx_kp_u_34m); - Py_CLEAR(clear_module_state->__pyx_kp_u_35m); - Py_CLEAR(clear_module_state->__pyx_kp_u_36m); - Py_CLEAR(clear_module_state->__pyx_kp_u_37m); - Py_CLEAR(clear_module_state->__pyx_kp_u_3_6_2); - Py_CLEAR(clear_module_state->__pyx_kp_u_4m); - Py_CLEAR(clear_module_state->__pyx_kp_u_90m); - Py_CLEAR(clear_module_state->__pyx_kp_u_91m); - Py_CLEAR(clear_module_state->__pyx_kp_u_92m); - Py_CLEAR(clear_module_state->__pyx_kp_u_93m); - Py_CLEAR(clear_module_state->__pyx_kp_u_94m); - Py_CLEAR(clear_module_state->__pyx_kp_u_95m); - Py_CLEAR(clear_module_state->__pyx_kp_u_96m); - Py_CLEAR(clear_module_state->__pyx_kp_u_97m); - Py_CLEAR(clear_module_state->__pyx_kp_u_AppData_Roaming); - Py_CLEAR(clear_module_state->__pyx_n_s_AssertionError); - Py_CLEAR(clear_module_state->__pyx_n_u_Darwin); - Py_CLEAR(clear_module_state->__pyx_n_s_FILE); - Py_CLEAR(clear_module_state->__pyx_n_s_INFO); - Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_Confidence_threshold); - Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_IoU); - Py_CLEAR(clear_module_state->__pyx_n_s_LOGGER); - Py_CLEAR(clear_module_state->__pyx_kp_u_Library_Application_Support); - Py_CLEAR(clear_module_state->__pyx_n_u_Linux); - Py_CLEAR(clear_module_state->__pyx_n_s_NCOLS); - Py_CLEAR(clear_module_state->__pyx_n_u_NUMEXPR_MAX_THREADS); - Py_CLEAR(clear_module_state->__pyx_n_s_NUM_THREADS); - Py_CLEAR(clear_module_state->__pyx_n_s_OSError); - Py_CLEAR(clear_module_state->__pyx_n_s_Path); - Py_CLEAR(clear_module_state->__pyx_kp_u_Python); - Py_CLEAR(clear_module_state->__pyx_n_u_RANK); - Py_CLEAR(clear_module_state->__pyx_n_s_ROOT); - Py_CLEAR(clear_module_state->__pyx_n_s_R_OK); - Py_CLEAR(clear_module_state->__pyx_n_s_T); - Py_CLEAR(clear_module_state->__pyx_n_s_Tensor); - Py_CLEAR(clear_module_state->__pyx_n_u_Ultralytics); - Py_CLEAR(clear_module_state->__pyx_n_s_VERBOSE); - Py_CLEAR(clear_module_state->__pyx_n_s_WARNING); - Py_CLEAR(clear_module_state->__pyx_kp_u_WARNING_NMS_time_limit); - Py_CLEAR(clear_module_state->__pyx_kp_u_WARNING_img_size); - Py_CLEAR(clear_module_state->__pyx_n_u_Windows); - Py_CLEAR(clear_module_state->__pyx_n_u_YOLOV5_CONFIG_DIR); - Py_CLEAR(clear_module_state->__pyx_n_u_YOLOv5_VERBOSE); - Py_CLEAR(clear_module_state->__pyx_kp_u__10); - Py_CLEAR(clear_module_state->__pyx_kp_u__11); - Py_CLEAR(clear_module_state->__pyx_n_u__14); - Py_CLEAR(clear_module_state->__pyx_kp_u__15); - Py_CLEAR(clear_module_state->__pyx_kp_u__16); - Py_CLEAR(clear_module_state->__pyx_kp_u__17); - Py_CLEAR(clear_module_state->__pyx_kp_u__18); - Py_CLEAR(clear_module_state->__pyx_n_s__20); - Py_CLEAR(clear_module_state->__pyx_kp_u__20); - Py_CLEAR(clear_module_state->__pyx_kp_u__21); - Py_CLEAR(clear_module_state->__pyx_n_u__22); - Py_CLEAR(clear_module_state->__pyx_n_u__4); - Py_CLEAR(clear_module_state->__pyx_kp_u__5); - Py_CLEAR(clear_module_state->__pyx_kp_u__6); - Py_CLEAR(clear_module_state->__pyx_kp_u__7); - Py_CLEAR(clear_module_state->__pyx_n_s__9); - Py_CLEAR(clear_module_state->__pyx_kp_u__9); - Py_CLEAR(clear_module_state->__pyx_n_s_access); - Py_CLEAR(clear_module_state->__pyx_n_s_agnostic); - Py_CLEAR(clear_module_state->__pyx_n_s_any); - Py_CLEAR(clear_module_state->__pyx_n_s_args); - Py_CLEAR(clear_module_state->__pyx_n_s_argsort); - Py_CLEAR(clear_module_state->__pyx_n_s_array); - Py_CLEAR(clear_module_state->__pyx_n_s_as_tuple); - Py_CLEAR(clear_module_state->__pyx_n_u_ascii); - Py_CLEAR(clear_module_state->__pyx_n_s_astype); - Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines); - Py_CLEAR(clear_module_state->__pyx_n_s_backends); - Py_CLEAR(clear_module_state->__pyx_n_s_basicConfig); - Py_CLEAR(clear_module_state->__pyx_n_s_benchmark); - Py_CLEAR(clear_module_state->__pyx_n_s_bincount); - Py_CLEAR(clear_module_state->__pyx_n_u_black); - Py_CLEAR(clear_module_state->__pyx_n_u_blue); - Py_CLEAR(clear_module_state->__pyx_n_u_bold); - Py_CLEAR(clear_module_state->__pyx_n_s_box); - Py_CLEAR(clear_module_state->__pyx_n_s_box_iou); - Py_CLEAR(clear_module_state->__pyx_n_s_boxes); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_black); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_blue); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_cyan); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_green); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_magenta); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_red); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_white); - Py_CLEAR(clear_module_state->__pyx_n_u_bright_yellow); - Py_CLEAR(clear_module_state->__pyx_n_s_c); - Py_CLEAR(clear_module_state->__pyx_n_s_cat); - Py_CLEAR(clear_module_state->__pyx_n_s_ceil); - Py_CLEAR(clear_module_state->__pyx_n_s_cfg); - Py_CLEAR(clear_module_state->__pyx_n_s_check_img_size); - Py_CLEAR(clear_module_state->__pyx_n_s_check_python); - Py_CLEAR(clear_module_state->__pyx_n_s_check_version); - Py_CLEAR(clear_module_state->__pyx_n_s_clamp); - Py_CLEAR(clear_module_state->__pyx_n_s_class_counts); - Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem); - Py_CLEAR(clear_module_state->__pyx_n_s_class_weights); - Py_CLEAR(clear_module_state->__pyx_n_s_classes); - Py_CLEAR(clear_module_state->__pyx_n_s_clean_str); - Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback); - Py_CLEAR(clear_module_state->__pyx_n_s_clip); - Py_CLEAR(clear_module_state->__pyx_n_s_clip_coords); - Py_CLEAR(clear_module_state->__pyx_n_s_clone); - Py_CLEAR(clear_module_state->__pyx_n_s_close); - Py_CLEAR(clear_module_state->__pyx_n_s_colors); - Py_CLEAR(clear_module_state->__pyx_n_s_colorstr); - Py_CLEAR(clear_module_state->__pyx_n_s_colorstr_locals_genexpr); - Py_CLEAR(clear_module_state->__pyx_n_s_columns); - Py_CLEAR(clear_module_state->__pyx_n_s_concatenate); - Py_CLEAR(clear_module_state->__pyx_n_s_conf); - Py_CLEAR(clear_module_state->__pyx_n_s_conf_thres); - Py_CLEAR(clear_module_state->__pyx_kp_u_config); - Py_CLEAR(clear_module_state->__pyx_n_s_coords); - Py_CLEAR(clear_module_state->__pyx_n_s_copy); - Py_CLEAR(clear_module_state->__pyx_n_s_cos); - Py_CLEAR(clear_module_state->__pyx_n_s_cpu_count); - Py_CLEAR(clear_module_state->__pyx_n_s_cudnn); - Py_CLEAR(clear_module_state->__pyx_n_s_current); - Py_CLEAR(clear_module_state->__pyx_n_s_cv2); - Py_CLEAR(clear_module_state->__pyx_n_u_cyan); - Py_CLEAR(clear_module_state->__pyx_kp_u_d); - Py_CLEAR(clear_module_state->__pyx_n_s_d_2); - Py_CLEAR(clear_module_state->__pyx_n_s_da); - Py_CLEAR(clear_module_state->__pyx_n_s_db); - Py_CLEAR(clear_module_state->__pyx_n_s_decode); - Py_CLEAR(clear_module_state->__pyx_n_s_descending); - Py_CLEAR(clear_module_state->__pyx_n_s_deterministic); - Py_CLEAR(clear_module_state->__pyx_n_s_device); - Py_CLEAR(clear_module_state->__pyx_n_s_dir); - Py_CLEAR(clear_module_state->__pyx_n_s_dirs); - Py_CLEAR(clear_module_state->__pyx_kp_u_disable); - Py_CLEAR(clear_module_state->__pyx_n_s_display); - Py_CLEAR(clear_module_state->__pyx_n_s_divisor); - Py_CLEAR(clear_module_state->__pyx_n_s_e); - Py_CLEAR(clear_module_state->__pyx_n_s_emojis); - Py_CLEAR(clear_module_state->__pyx_kp_u_enable); - Py_CLEAR(clear_module_state->__pyx_n_s_encode); - Py_CLEAR(clear_module_state->__pyx_n_u_end); - Py_CLEAR(clear_module_state->__pyx_n_s_enter); - Py_CLEAR(clear_module_state->__pyx_n_s_enumerate); - Py_CLEAR(clear_module_state->__pyx_n_s_env); - Py_CLEAR(clear_module_state->__pyx_n_s_env_var); - Py_CLEAR(clear_module_state->__pyx_n_s_environ); - Py_CLEAR(clear_module_state->__pyx_n_s_eps); - Py_CLEAR(clear_module_state->__pyx_n_s_exclude); - Py_CLEAR(clear_module_state->__pyx_n_s_exist_ok); - Py_CLEAR(clear_module_state->__pyx_n_s_exists); - Py_CLEAR(clear_module_state->__pyx_n_s_exit); - Py_CLEAR(clear_module_state->__pyx_n_s_f); - Py_CLEAR(clear_module_state->__pyx_n_s_file); - Py_CLEAR(clear_module_state->__pyx_n_s_file_2); - Py_CLEAR(clear_module_state->__pyx_n_s_file_size); - Py_CLEAR(clear_module_state->__pyx_n_s_file_size_locals_genexpr); - Py_CLEAR(clear_module_state->__pyx_n_s_fitness); - Py_CLEAR(clear_module_state->__pyx_n_s_float); - Py_CLEAR(clear_module_state->__pyx_n_u_float_kind); - Py_CLEAR(clear_module_state->__pyx_n_s_floor); - Py_CLEAR(clear_module_state->__pyx_n_s_format); - Py_CLEAR(clear_module_state->__pyx_n_s_formatter); - Py_CLEAR(clear_module_state->__pyx_n_s_from_numpy); - Py_CLEAR(clear_module_state->__pyx_n_s_func); - Py_CLEAR(clear_module_state->__pyx_n_s_gain); - Py_CLEAR(clear_module_state->__pyx_kp_u_gc); - Py_CLEAR(clear_module_state->__pyx_n_s_genexpr); - Py_CLEAR(clear_module_state->__pyx_n_s_get); - Py_CLEAR(clear_module_state->__pyx_n_s_getLogger); - Py_CLEAR(clear_module_state->__pyx_n_s_get_latest_run); - Py_CLEAR(clear_module_state->__pyx_n_s_get_terminal_size); - Py_CLEAR(clear_module_state->__pyx_n_s_getctime); - Py_CLEAR(clear_module_state->__pyx_n_s_getenv); - Py_CLEAR(clear_module_state->__pyx_n_s_glob); - Py_CLEAR(clear_module_state->__pyx_n_u_green); - Py_CLEAR(clear_module_state->__pyx_n_s_groups); - Py_CLEAR(clear_module_state->__pyx_n_s_h); - Py_CLEAR(clear_module_state->__pyx_n_s_handler); - Py_CLEAR(clear_module_state->__pyx_n_s_hard); - Py_CLEAR(clear_module_state->__pyx_n_s_home); - Py_CLEAR(clear_module_state->__pyx_n_s_i); - Py_CLEAR(clear_module_state->__pyx_n_u_ignore); - Py_CLEAR(clear_module_state->__pyx_n_s_image_weights); - Py_CLEAR(clear_module_state->__pyx_n_s_img0_shape); - Py_CLEAR(clear_module_state->__pyx_n_s_img1_shape); - Py_CLEAR(clear_module_state->__pyx_n_s_imgsz); - Py_CLEAR(clear_module_state->__pyx_n_s_import); - Py_CLEAR(clear_module_state->__pyx_n_s_increment_path); - Py_CLEAR(clear_module_state->__pyx_n_s_info); - Py_CLEAR(clear_module_state->__pyx_n_s_init_seeds); - Py_CLEAR(clear_module_state->__pyx_n_s_initializing); - Py_CLEAR(clear_module_state->__pyx_n_s_input); - Py_CLEAR(clear_module_state->__pyx_n_s_instance); - Py_CLEAR(clear_module_state->__pyx_n_s_int); - Py_CLEAR(clear_module_state->__pyx_n_s_intersect_dicts); - Py_CLEAR(clear_module_state->__pyx_n_s_intersect_dicts_locals_genexpr); - Py_CLEAR(clear_module_state->__pyx_n_s_iou); - Py_CLEAR(clear_module_state->__pyx_n_s_iou_thres); - Py_CLEAR(clear_module_state->__pyx_n_s_is_ascii); - Py_CLEAR(clear_module_state->__pyx_n_s_is_chinese); - Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine); - Py_CLEAR(clear_module_state->__pyx_n_s_is_dir); - Py_CLEAR(clear_module_state->__pyx_n_s_is_file); - Py_CLEAR(clear_module_state->__pyx_n_s_is_writeable); - Py_CLEAR(clear_module_state->__pyx_kp_u_isenabled); - Py_CLEAR(clear_module_state->__pyx_n_s_items); - Py_CLEAR(clear_module_state->__pyx_n_s_j); - Py_CLEAR(clear_module_state->__pyx_n_s_k); - Py_CLEAR(clear_module_state->__pyx_n_s_keepdim); - Py_CLEAR(clear_module_state->__pyx_n_s_key); - Py_CLEAR(clear_module_state->__pyx_n_s_kwargs); - Py_CLEAR(clear_module_state->__pyx_n_s_l); - Py_CLEAR(clear_module_state->__pyx_n_s_labels); - Py_CLEAR(clear_module_state->__pyx_n_s_labels_to_class_weights); - Py_CLEAR(clear_module_state->__pyx_n_s_labels_to_image_weights); - Py_CLEAR(clear_module_state->__pyx_n_s_last_list); - Py_CLEAR(clear_module_state->__pyx_kp_u_last_pt); - Py_CLEAR(clear_module_state->__pyx_n_s_level); - Py_CLEAR(clear_module_state->__pyx_n_s_linewidth); - Py_CLEAR(clear_module_state->__pyx_n_s_logging); - Py_CLEAR(clear_module_state->__pyx_n_s_long); - Py_CLEAR(clear_module_state->__pyx_n_u_long); - Py_CLEAR(clear_module_state->__pyx_n_s_lower); - Py_CLEAR(clear_module_state->__pyx_n_s_m); - Py_CLEAR(clear_module_state->__pyx_n_u_magenta); - Py_CLEAR(clear_module_state->__pyx_n_s_main); - Py_CLEAR(clear_module_state->__pyx_n_s_make_divisible); - Py_CLEAR(clear_module_state->__pyx_n_s_manual_seed); - Py_CLEAR(clear_module_state->__pyx_n_s_matches); - Py_CLEAR(clear_module_state->__pyx_n_s_math); - Py_CLEAR(clear_module_state->__pyx_n_s_max); - Py_CLEAR(clear_module_state->__pyx_n_s_max_columns); - Py_CLEAR(clear_module_state->__pyx_n_s_max_det); - Py_CLEAR(clear_module_state->__pyx_n_s_max_nms); - Py_CLEAR(clear_module_state->__pyx_n_s_max_wh); - Py_CLEAR(clear_module_state->__pyx_n_s_merge); - Py_CLEAR(clear_module_state->__pyx_kp_u_message_s); - Py_CLEAR(clear_module_state->__pyx_n_s_methods); - Py_CLEAR(clear_module_state->__pyx_n_s_min_wh); - Py_CLEAR(clear_module_state->__pyx_n_s_minimum); - Py_CLEAR(clear_module_state->__pyx_n_s_minlength); - Py_CLEAR(clear_module_state->__pyx_n_s_mkdir); - Py_CLEAR(clear_module_state->__pyx_n_s_mm); - Py_CLEAR(clear_module_state->__pyx_n_s_multi_label); - Py_CLEAR(clear_module_state->__pyx_kp_u_must_be_multiple_of_max_stride); - Py_CLEAR(clear_module_state->__pyx_n_s_n); - Py_CLEAR(clear_module_state->__pyx_n_s_name); - Py_CLEAR(clear_module_state->__pyx_n_s_name_2); - Py_CLEAR(clear_module_state->__pyx_n_s_nc); - Py_CLEAR(clear_module_state->__pyx_n_s_new_size); - Py_CLEAR(clear_module_state->__pyx_n_s_nms); - Py_CLEAR(clear_module_state->__pyx_n_s_non_max_suppression); - Py_CLEAR(clear_module_state->__pyx_n_s_nonzero); - Py_CLEAR(clear_module_state->__pyx_n_s_np); - Py_CLEAR(clear_module_state->__pyx_n_s_numpy); - Py_CLEAR(clear_module_state->__pyx_n_s_one_cycle); - Py_CLEAR(clear_module_state->__pyx_n_s_one_cycle_locals_lambda); - Py_CLEAR(clear_module_state->__pyx_n_s_ones); - Py_CLEAR(clear_module_state->__pyx_n_s_open); - Py_CLEAR(clear_module_state->__pyx_n_s_ops); - Py_CLEAR(clear_module_state->__pyx_n_s_opt); - Py_CLEAR(clear_module_state->__pyx_n_s_options); - Py_CLEAR(clear_module_state->__pyx_n_s_os); - Py_CLEAR(clear_module_state->__pyx_n_s_output); - Py_CLEAR(clear_module_state->__pyx_n_s_pad); - Py_CLEAR(clear_module_state->__pyx_n_s_padh); - Py_CLEAR(clear_module_state->__pyx_n_s_padw); - Py_CLEAR(clear_module_state->__pyx_n_s_pandas); - Py_CLEAR(clear_module_state->__pyx_n_s_parents); - Py_CLEAR(clear_module_state->__pyx_n_s_parse); - Py_CLEAR(clear_module_state->__pyx_n_s_path); - Py_CLEAR(clear_module_state->__pyx_n_s_pathlib); - Py_CLEAR(clear_module_state->__pyx_n_s_pattern); - Py_CLEAR(clear_module_state->__pyx_n_s_pd); - Py_CLEAR(clear_module_state->__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils); - Py_CLEAR(clear_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); - Py_CLEAR(clear_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3); - Py_CLEAR(clear_module_state->__pyx_n_s_pi); - Py_CLEAR(clear_module_state->__pyx_n_s_pinned); - Py_CLEAR(clear_module_state->__pyx_n_s_platform); - Py_CLEAR(clear_module_state->__pyx_n_s_precision); - Py_CLEAR(clear_module_state->__pyx_n_s_prediction); - Py_CLEAR(clear_module_state->__pyx_n_s_print); - Py_CLEAR(clear_module_state->__pyx_n_s_print_args); - Py_CLEAR(clear_module_state->__pyx_n_s_print_args_locals_genexpr); - Py_CLEAR(clear_module_state->__pyx_n_s_profile); - Py_CLEAR(clear_module_state->__pyx_n_s_python_version); - Py_CLEAR(clear_module_state->__pyx_n_s_random); - Py_CLEAR(clear_module_state->__pyx_n_s_range); - Py_CLEAR(clear_module_state->__pyx_n_s_rank); - Py_CLEAR(clear_module_state->__pyx_n_s_ratio_pad); - Py_CLEAR(clear_module_state->__pyx_n_s_re); - Py_CLEAR(clear_module_state->__pyx_n_s_recursive); - Py_CLEAR(clear_module_state->__pyx_n_u_red); - Py_CLEAR(clear_module_state->__pyx_n_s_redundant); - Py_CLEAR(clear_module_state->__pyx_n_s_repl); - Py_CLEAR(clear_module_state->__pyx_n_s_replace); - Py_CLEAR(clear_module_state->__pyx_n_s_reshape); - Py_CLEAR(clear_module_state->__pyx_n_s_resolve); - Py_CLEAR(clear_module_state->__pyx_n_s_s); - Py_CLEAR(clear_module_state->__pyx_kp_u_s_2); - Py_CLEAR(clear_module_state->__pyx_kp_u_s_exceeded); - Py_CLEAR(clear_module_state->__pyx_n_s_scale_coords); - Py_CLEAR(clear_module_state->__pyx_n_s_scores); - Py_CLEAR(clear_module_state->__pyx_n_s_search); - Py_CLEAR(clear_module_state->__pyx_n_s_search_dir); - Py_CLEAR(clear_module_state->__pyx_n_s_seed); - Py_CLEAR(clear_module_state->__pyx_n_s_send); - Py_CLEAR(clear_module_state->__pyx_n_s_sep); - Py_CLEAR(clear_module_state->__pyx_n_s_setNumThreads); - Py_CLEAR(clear_module_state->__pyx_n_s_set_logging); - Py_CLEAR(clear_module_state->__pyx_n_s_set_printoptions); - Py_CLEAR(clear_module_state->__pyx_n_s_shape); - Py_CLEAR(clear_module_state->__pyx_n_s_shutil); - Py_CLEAR(clear_module_state->__pyx_n_s_spec); - Py_CLEAR(clear_module_state->__pyx_n_s_split); - Py_CLEAR(clear_module_state->__pyx_n_s_st_size); - Py_CLEAR(clear_module_state->__pyx_n_s_startswith); - Py_CLEAR(clear_module_state->__pyx_n_s_stat); - Py_CLEAR(clear_module_state->__pyx_n_s_stem); - Py_CLEAR(clear_module_state->__pyx_n_s_steps); - Py_CLEAR(clear_module_state->__pyx_n_s_str); - Py_CLEAR(clear_module_state->__pyx_n_s_string); - Py_CLEAR(clear_module_state->__pyx_n_s_sub); - Py_CLEAR(clear_module_state->__pyx_n_s_suffix); - Py_CLEAR(clear_module_state->__pyx_n_s_sum); - Py_CLEAR(clear_module_state->__pyx_n_s_system); - Py_CLEAR(clear_module_state->__pyx_n_s_t); - Py_CLEAR(clear_module_state->__pyx_n_s_tensor); - Py_CLEAR(clear_module_state->__pyx_n_s_test); - Py_CLEAR(clear_module_state->__pyx_n_s_test_2); - Py_CLEAR(clear_module_state->__pyx_n_s_throw); - Py_CLEAR(clear_module_state->__pyx_n_s_time); - Py_CLEAR(clear_module_state->__pyx_n_s_time_limit); - Py_CLEAR(clear_module_state->__pyx_kp_u_tmp); - Py_CLEAR(clear_module_state->__pyx_kp_u_tmp_txt); - Py_CLEAR(clear_module_state->__pyx_n_s_torch); - Py_CLEAR(clear_module_state->__pyx_n_s_torch_backends_cudnn); - Py_CLEAR(clear_module_state->__pyx_n_s_torchvision); - Py_CLEAR(clear_module_state->__pyx_n_u_true); - Py_CLEAR(clear_module_state->__pyx_n_s_try_except); - Py_CLEAR(clear_module_state->__pyx_n_s_try_except_locals_handler); - Py_CLEAR(clear_module_state->__pyx_n_u_underline); - Py_CLEAR(clear_module_state->__pyx_n_s_unlink); - Py_CLEAR(clear_module_state->__pyx_n_s_unquote); - Py_CLEAR(clear_module_state->__pyx_kp_u_updating_to); - Py_CLEAR(clear_module_state->__pyx_n_s_url); - Py_CLEAR(clear_module_state->__pyx_n_s_url2file); - Py_CLEAR(clear_module_state->__pyx_n_s_urllib); - Py_CLEAR(clear_module_state->__pyx_n_s_user_config_dir); - Py_CLEAR(clear_module_state->__pyx_n_s_v); - Py_CLEAR(clear_module_state->__pyx_kp_u_valid_values_are_between_0_0_an); - Py_CLEAR(clear_module_state->__pyx_n_s_vars); - Py_CLEAR(clear_module_state->__pyx_n_s_verbose); - Py_CLEAR(clear_module_state->__pyx_kp_u_version); - Py_CLEAR(clear_module_state->__pyx_n_s_view); - Py_CLEAR(clear_module_state->__pyx_n_s_w); - Py_CLEAR(clear_module_state->__pyx_n_u_w); - Py_CLEAR(clear_module_state->__pyx_n_s_warning); - Py_CLEAR(clear_module_state->__pyx_n_s_weights); - Py_CLEAR(clear_module_state->__pyx_n_u_white); - Py_CLEAR(clear_module_state->__pyx_n_s_with_suffix); - Py_CLEAR(clear_module_state->__pyx_n_s_x); - Py_CLEAR(clear_module_state->__pyx_n_s_xc); - Py_CLEAR(clear_module_state->__pyx_n_s_xi); - Py_CLEAR(clear_module_state->__pyx_n_s_xyn2xy); - Py_CLEAR(clear_module_state->__pyx_n_s_xywh2xyxy); - Py_CLEAR(clear_module_state->__pyx_n_s_xywhn2xyxy); - Py_CLEAR(clear_module_state->__pyx_n_s_xyxy2xywh); - Py_CLEAR(clear_module_state->__pyx_n_s_xyxy2xywhn); - Py_CLEAR(clear_module_state->__pyx_n_s_y); - Py_CLEAR(clear_module_state->__pyx_n_s_y1); - Py_CLEAR(clear_module_state->__pyx_n_s_y2); - Py_CLEAR(clear_module_state->__pyx_n_s_yaml); - Py_CLEAR(clear_module_state->__pyx_n_u_yellow); - Py_CLEAR(clear_module_state->__pyx_n_u_yolov5); - Py_CLEAR(clear_module_state->__pyx_n_s_zeros); - Py_CLEAR(clear_module_state->__pyx_float_0_0); - Py_CLEAR(clear_module_state->__pyx_float_1_0); - Py_CLEAR(clear_module_state->__pyx_float_1E6); - Py_CLEAR(clear_module_state->__pyx_float_3E3); - Py_CLEAR(clear_module_state->__pyx_float_0_25); - Py_CLEAR(clear_module_state->__pyx_float_0_45); - Py_CLEAR(clear_module_state->__pyx_int_0); - Py_CLEAR(clear_module_state->__pyx_int_1); - Py_CLEAR(clear_module_state->__pyx_int_2); - Py_CLEAR(clear_module_state->__pyx_int_3); - Py_CLEAR(clear_module_state->__pyx_int_4); - Py_CLEAR(clear_module_state->__pyx_int_5); - Py_CLEAR(clear_module_state->__pyx_int_6); - Py_CLEAR(clear_module_state->__pyx_int_10); - Py_CLEAR(clear_module_state->__pyx_int_32); - Py_CLEAR(clear_module_state->__pyx_int_80); - Py_CLEAR(clear_module_state->__pyx_int_100); - Py_CLEAR(clear_module_state->__pyx_int_300); - Py_CLEAR(clear_module_state->__pyx_int_320); - Py_CLEAR(clear_module_state->__pyx_int_640); - Py_CLEAR(clear_module_state->__pyx_int_neg_1); - Py_CLEAR(clear_module_state->__pyx_tuple_); - Py_CLEAR(clear_module_state->__pyx_tuple__2); - Py_CLEAR(clear_module_state->__pyx_tuple__8); - Py_CLEAR(clear_module_state->__pyx_slice__23); - Py_CLEAR(clear_module_state->__pyx_slice__28); - Py_CLEAR(clear_module_state->__pyx_slice__33); - Py_CLEAR(clear_module_state->__pyx_slice__36); - Py_CLEAR(clear_module_state->__pyx_slice__38); - Py_CLEAR(clear_module_state->__pyx_slice__42); - Py_CLEAR(clear_module_state->__pyx_tuple__12); - Py_CLEAR(clear_module_state->__pyx_tuple__13); - Py_CLEAR(clear_module_state->__pyx_tuple__19); - Py_CLEAR(clear_module_state->__pyx_tuple__24); - Py_CLEAR(clear_module_state->__pyx_tuple__25); - Py_CLEAR(clear_module_state->__pyx_tuple__26); - Py_CLEAR(clear_module_state->__pyx_tuple__27); - Py_CLEAR(clear_module_state->__pyx_tuple__29); - Py_CLEAR(clear_module_state->__pyx_tuple__30); - Py_CLEAR(clear_module_state->__pyx_tuple__31); - Py_CLEAR(clear_module_state->__pyx_tuple__32); - Py_CLEAR(clear_module_state->__pyx_tuple__34); - Py_CLEAR(clear_module_state->__pyx_tuple__35); - Py_CLEAR(clear_module_state->__pyx_tuple__37); - Py_CLEAR(clear_module_state->__pyx_tuple__39); - Py_CLEAR(clear_module_state->__pyx_tuple__40); - Py_CLEAR(clear_module_state->__pyx_tuple__41); - Py_CLEAR(clear_module_state->__pyx_tuple__43); - Py_CLEAR(clear_module_state->__pyx_tuple__44); - Py_CLEAR(clear_module_state->__pyx_tuple__45); - Py_CLEAR(clear_module_state->__pyx_tuple__46); - Py_CLEAR(clear_module_state->__pyx_tuple__48); - Py_CLEAR(clear_module_state->__pyx_tuple__49); - Py_CLEAR(clear_module_state->__pyx_tuple__51); - Py_CLEAR(clear_module_state->__pyx_tuple__53); - Py_CLEAR(clear_module_state->__pyx_tuple__55); - Py_CLEAR(clear_module_state->__pyx_tuple__57); - Py_CLEAR(clear_module_state->__pyx_tuple__58); - Py_CLEAR(clear_module_state->__pyx_tuple__60); - Py_CLEAR(clear_module_state->__pyx_tuple__61); - Py_CLEAR(clear_module_state->__pyx_tuple__63); - Py_CLEAR(clear_module_state->__pyx_tuple__64); - Py_CLEAR(clear_module_state->__pyx_tuple__66); - Py_CLEAR(clear_module_state->__pyx_tuple__67); - Py_CLEAR(clear_module_state->__pyx_tuple__69); - Py_CLEAR(clear_module_state->__pyx_tuple__70); - Py_CLEAR(clear_module_state->__pyx_tuple__72); - Py_CLEAR(clear_module_state->__pyx_tuple__74); - Py_CLEAR(clear_module_state->__pyx_tuple__75); - Py_CLEAR(clear_module_state->__pyx_tuple__77); - Py_CLEAR(clear_module_state->__pyx_tuple__78); - Py_CLEAR(clear_module_state->__pyx_tuple__80); - Py_CLEAR(clear_module_state->__pyx_tuple__82); - Py_CLEAR(clear_module_state->__pyx_tuple__83); - Py_CLEAR(clear_module_state->__pyx_tuple__85); - Py_CLEAR(clear_module_state->__pyx_tuple__86); - Py_CLEAR(clear_module_state->__pyx_tuple__88); - Py_CLEAR(clear_module_state->__pyx_tuple__89); - Py_CLEAR(clear_module_state->__pyx_tuple__91); - Py_CLEAR(clear_module_state->__pyx_tuple__94); - Py_CLEAR(clear_module_state->__pyx_tuple__96); - Py_CLEAR(clear_module_state->__pyx_tuple__97); - Py_CLEAR(clear_module_state->__pyx_tuple__99); - Py_CLEAR(clear_module_state->__pyx_codeobj__3); - Py_CLEAR(clear_module_state->__pyx_tuple__101); - Py_CLEAR(clear_module_state->__pyx_tuple__102); - Py_CLEAR(clear_module_state->__pyx_tuple__104); - Py_CLEAR(clear_module_state->__pyx_tuple__105); - Py_CLEAR(clear_module_state->__pyx_tuple__108); - Py_CLEAR(clear_module_state->__pyx_tuple__110); - Py_CLEAR(clear_module_state->__pyx_tuple__111); - Py_CLEAR(clear_module_state->__pyx_tuple__113); - Py_CLEAR(clear_module_state->__pyx_tuple__115); - Py_CLEAR(clear_module_state->__pyx_tuple__116); - Py_CLEAR(clear_module_state->__pyx_tuple__118); - Py_CLEAR(clear_module_state->__pyx_tuple__119); - Py_CLEAR(clear_module_state->__pyx_tuple__121); - Py_CLEAR(clear_module_state->__pyx_tuple__123); - Py_CLEAR(clear_module_state->__pyx_tuple__124); - Py_CLEAR(clear_module_state->__pyx_tuple__126); - Py_CLEAR(clear_module_state->__pyx_codeobj__47); - Py_CLEAR(clear_module_state->__pyx_codeobj__50); - Py_CLEAR(clear_module_state->__pyx_codeobj__52); - Py_CLEAR(clear_module_state->__pyx_codeobj__54); - Py_CLEAR(clear_module_state->__pyx_codeobj__56); - Py_CLEAR(clear_module_state->__pyx_codeobj__59); - Py_CLEAR(clear_module_state->__pyx_codeobj__62); - Py_CLEAR(clear_module_state->__pyx_codeobj__65); - Py_CLEAR(clear_module_state->__pyx_codeobj__68); - Py_CLEAR(clear_module_state->__pyx_codeobj__71); - Py_CLEAR(clear_module_state->__pyx_codeobj__73); - Py_CLEAR(clear_module_state->__pyx_codeobj__76); - Py_CLEAR(clear_module_state->__pyx_codeobj__79); - Py_CLEAR(clear_module_state->__pyx_codeobj__81); - Py_CLEAR(clear_module_state->__pyx_codeobj__84); - Py_CLEAR(clear_module_state->__pyx_codeobj__87); - Py_CLEAR(clear_module_state->__pyx_codeobj__90); - Py_CLEAR(clear_module_state->__pyx_codeobj__92); - Py_CLEAR(clear_module_state->__pyx_codeobj__93); - Py_CLEAR(clear_module_state->__pyx_codeobj__95); - Py_CLEAR(clear_module_state->__pyx_codeobj__98); - Py_CLEAR(clear_module_state->__pyx_codeobj__100); - Py_CLEAR(clear_module_state->__pyx_codeobj__103); - Py_CLEAR(clear_module_state->__pyx_codeobj__106); - Py_CLEAR(clear_module_state->__pyx_codeobj__107); - Py_CLEAR(clear_module_state->__pyx_codeobj__109); - Py_CLEAR(clear_module_state->__pyx_codeobj__112); - Py_CLEAR(clear_module_state->__pyx_codeobj__114); - Py_CLEAR(clear_module_state->__pyx_codeobj__117); - Py_CLEAR(clear_module_state->__pyx_codeobj__120); - Py_CLEAR(clear_module_state->__pyx_codeobj__122); - Py_CLEAR(clear_module_state->__pyx_codeobj__125); - return 0; -} -#endif -/* #### Code section: module_state_traverse ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { - __pyx_mstate *traverse_module_state = __pyx_mstate(m); - if (!traverse_module_state) return 0; - Py_VISIT(traverse_module_state->__pyx_d); - Py_VISIT(traverse_module_state->__pyx_b); - Py_VISIT(traverse_module_state->__pyx_cython_runtime); - Py_VISIT(traverse_module_state->__pyx_empty_tuple); - Py_VISIT(traverse_module_state->__pyx_empty_bytes); - Py_VISIT(traverse_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_VISIT(traverse_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_VISIT(traverse_module_state->__pyx_FusedFunctionType); - #endif - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr); - Py_VISIT(traverse_module_state->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr); - Py_VISIT(traverse_module_state->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr); - Py_VISIT(traverse_module_state->__pyx_kp_u_0_0_0); - Py_VISIT(traverse_module_state->__pyx_kp_u_0m); - Py_VISIT(traverse_module_state->__pyx_kp_u_11_5g); - Py_VISIT(traverse_module_state->__pyx_kp_u_1m); - Py_VISIT(traverse_module_state->__pyx_kp_u_30m); - Py_VISIT(traverse_module_state->__pyx_kp_u_31m); - Py_VISIT(traverse_module_state->__pyx_kp_u_32m); - Py_VISIT(traverse_module_state->__pyx_kp_u_33m); - Py_VISIT(traverse_module_state->__pyx_kp_u_34m); - Py_VISIT(traverse_module_state->__pyx_kp_u_35m); - Py_VISIT(traverse_module_state->__pyx_kp_u_36m); - Py_VISIT(traverse_module_state->__pyx_kp_u_37m); - Py_VISIT(traverse_module_state->__pyx_kp_u_3_6_2); - Py_VISIT(traverse_module_state->__pyx_kp_u_4m); - Py_VISIT(traverse_module_state->__pyx_kp_u_90m); - Py_VISIT(traverse_module_state->__pyx_kp_u_91m); - Py_VISIT(traverse_module_state->__pyx_kp_u_92m); - Py_VISIT(traverse_module_state->__pyx_kp_u_93m); - Py_VISIT(traverse_module_state->__pyx_kp_u_94m); - Py_VISIT(traverse_module_state->__pyx_kp_u_95m); - Py_VISIT(traverse_module_state->__pyx_kp_u_96m); - Py_VISIT(traverse_module_state->__pyx_kp_u_97m); - Py_VISIT(traverse_module_state->__pyx_kp_u_AppData_Roaming); - Py_VISIT(traverse_module_state->__pyx_n_s_AssertionError); - Py_VISIT(traverse_module_state->__pyx_n_u_Darwin); - Py_VISIT(traverse_module_state->__pyx_n_s_FILE); - Py_VISIT(traverse_module_state->__pyx_n_s_INFO); - Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_Confidence_threshold); - Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_IoU); - Py_VISIT(traverse_module_state->__pyx_n_s_LOGGER); - Py_VISIT(traverse_module_state->__pyx_kp_u_Library_Application_Support); - Py_VISIT(traverse_module_state->__pyx_n_u_Linux); - Py_VISIT(traverse_module_state->__pyx_n_s_NCOLS); - Py_VISIT(traverse_module_state->__pyx_n_u_NUMEXPR_MAX_THREADS); - Py_VISIT(traverse_module_state->__pyx_n_s_NUM_THREADS); - Py_VISIT(traverse_module_state->__pyx_n_s_OSError); - Py_VISIT(traverse_module_state->__pyx_n_s_Path); - Py_VISIT(traverse_module_state->__pyx_kp_u_Python); - Py_VISIT(traverse_module_state->__pyx_n_u_RANK); - Py_VISIT(traverse_module_state->__pyx_n_s_ROOT); - Py_VISIT(traverse_module_state->__pyx_n_s_R_OK); - Py_VISIT(traverse_module_state->__pyx_n_s_T); - Py_VISIT(traverse_module_state->__pyx_n_s_Tensor); - Py_VISIT(traverse_module_state->__pyx_n_u_Ultralytics); - Py_VISIT(traverse_module_state->__pyx_n_s_VERBOSE); - Py_VISIT(traverse_module_state->__pyx_n_s_WARNING); - Py_VISIT(traverse_module_state->__pyx_kp_u_WARNING_NMS_time_limit); - Py_VISIT(traverse_module_state->__pyx_kp_u_WARNING_img_size); - Py_VISIT(traverse_module_state->__pyx_n_u_Windows); - Py_VISIT(traverse_module_state->__pyx_n_u_YOLOV5_CONFIG_DIR); - Py_VISIT(traverse_module_state->__pyx_n_u_YOLOv5_VERBOSE); - Py_VISIT(traverse_module_state->__pyx_kp_u__10); - Py_VISIT(traverse_module_state->__pyx_kp_u__11); - Py_VISIT(traverse_module_state->__pyx_n_u__14); - Py_VISIT(traverse_module_state->__pyx_kp_u__15); - Py_VISIT(traverse_module_state->__pyx_kp_u__16); - Py_VISIT(traverse_module_state->__pyx_kp_u__17); - Py_VISIT(traverse_module_state->__pyx_kp_u__18); - Py_VISIT(traverse_module_state->__pyx_n_s__20); - Py_VISIT(traverse_module_state->__pyx_kp_u__20); - Py_VISIT(traverse_module_state->__pyx_kp_u__21); - Py_VISIT(traverse_module_state->__pyx_n_u__22); - Py_VISIT(traverse_module_state->__pyx_n_u__4); - Py_VISIT(traverse_module_state->__pyx_kp_u__5); - Py_VISIT(traverse_module_state->__pyx_kp_u__6); - Py_VISIT(traverse_module_state->__pyx_kp_u__7); - Py_VISIT(traverse_module_state->__pyx_n_s__9); - Py_VISIT(traverse_module_state->__pyx_kp_u__9); - Py_VISIT(traverse_module_state->__pyx_n_s_access); - Py_VISIT(traverse_module_state->__pyx_n_s_agnostic); - Py_VISIT(traverse_module_state->__pyx_n_s_any); - Py_VISIT(traverse_module_state->__pyx_n_s_args); - Py_VISIT(traverse_module_state->__pyx_n_s_argsort); - Py_VISIT(traverse_module_state->__pyx_n_s_array); - Py_VISIT(traverse_module_state->__pyx_n_s_as_tuple); - Py_VISIT(traverse_module_state->__pyx_n_u_ascii); - Py_VISIT(traverse_module_state->__pyx_n_s_astype); - Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines); - Py_VISIT(traverse_module_state->__pyx_n_s_backends); - Py_VISIT(traverse_module_state->__pyx_n_s_basicConfig); - Py_VISIT(traverse_module_state->__pyx_n_s_benchmark); - Py_VISIT(traverse_module_state->__pyx_n_s_bincount); - Py_VISIT(traverse_module_state->__pyx_n_u_black); - Py_VISIT(traverse_module_state->__pyx_n_u_blue); - Py_VISIT(traverse_module_state->__pyx_n_u_bold); - Py_VISIT(traverse_module_state->__pyx_n_s_box); - Py_VISIT(traverse_module_state->__pyx_n_s_box_iou); - Py_VISIT(traverse_module_state->__pyx_n_s_boxes); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_black); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_blue); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_cyan); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_green); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_magenta); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_red); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_white); - Py_VISIT(traverse_module_state->__pyx_n_u_bright_yellow); - Py_VISIT(traverse_module_state->__pyx_n_s_c); - Py_VISIT(traverse_module_state->__pyx_n_s_cat); - Py_VISIT(traverse_module_state->__pyx_n_s_ceil); - Py_VISIT(traverse_module_state->__pyx_n_s_cfg); - Py_VISIT(traverse_module_state->__pyx_n_s_check_img_size); - Py_VISIT(traverse_module_state->__pyx_n_s_check_python); - Py_VISIT(traverse_module_state->__pyx_n_s_check_version); - Py_VISIT(traverse_module_state->__pyx_n_s_clamp); - Py_VISIT(traverse_module_state->__pyx_n_s_class_counts); - Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem); - Py_VISIT(traverse_module_state->__pyx_n_s_class_weights); - Py_VISIT(traverse_module_state->__pyx_n_s_classes); - Py_VISIT(traverse_module_state->__pyx_n_s_clean_str); - Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback); - Py_VISIT(traverse_module_state->__pyx_n_s_clip); - Py_VISIT(traverse_module_state->__pyx_n_s_clip_coords); - Py_VISIT(traverse_module_state->__pyx_n_s_clone); - Py_VISIT(traverse_module_state->__pyx_n_s_close); - Py_VISIT(traverse_module_state->__pyx_n_s_colors); - Py_VISIT(traverse_module_state->__pyx_n_s_colorstr); - Py_VISIT(traverse_module_state->__pyx_n_s_colorstr_locals_genexpr); - Py_VISIT(traverse_module_state->__pyx_n_s_columns); - Py_VISIT(traverse_module_state->__pyx_n_s_concatenate); - Py_VISIT(traverse_module_state->__pyx_n_s_conf); - Py_VISIT(traverse_module_state->__pyx_n_s_conf_thres); - Py_VISIT(traverse_module_state->__pyx_kp_u_config); - Py_VISIT(traverse_module_state->__pyx_n_s_coords); - Py_VISIT(traverse_module_state->__pyx_n_s_copy); - Py_VISIT(traverse_module_state->__pyx_n_s_cos); - Py_VISIT(traverse_module_state->__pyx_n_s_cpu_count); - Py_VISIT(traverse_module_state->__pyx_n_s_cudnn); - Py_VISIT(traverse_module_state->__pyx_n_s_current); - Py_VISIT(traverse_module_state->__pyx_n_s_cv2); - Py_VISIT(traverse_module_state->__pyx_n_u_cyan); - Py_VISIT(traverse_module_state->__pyx_kp_u_d); - Py_VISIT(traverse_module_state->__pyx_n_s_d_2); - Py_VISIT(traverse_module_state->__pyx_n_s_da); - Py_VISIT(traverse_module_state->__pyx_n_s_db); - Py_VISIT(traverse_module_state->__pyx_n_s_decode); - Py_VISIT(traverse_module_state->__pyx_n_s_descending); - Py_VISIT(traverse_module_state->__pyx_n_s_deterministic); - Py_VISIT(traverse_module_state->__pyx_n_s_device); - Py_VISIT(traverse_module_state->__pyx_n_s_dir); - Py_VISIT(traverse_module_state->__pyx_n_s_dirs); - Py_VISIT(traverse_module_state->__pyx_kp_u_disable); - Py_VISIT(traverse_module_state->__pyx_n_s_display); - Py_VISIT(traverse_module_state->__pyx_n_s_divisor); - Py_VISIT(traverse_module_state->__pyx_n_s_e); - Py_VISIT(traverse_module_state->__pyx_n_s_emojis); - Py_VISIT(traverse_module_state->__pyx_kp_u_enable); - Py_VISIT(traverse_module_state->__pyx_n_s_encode); - Py_VISIT(traverse_module_state->__pyx_n_u_end); - Py_VISIT(traverse_module_state->__pyx_n_s_enter); - Py_VISIT(traverse_module_state->__pyx_n_s_enumerate); - Py_VISIT(traverse_module_state->__pyx_n_s_env); - Py_VISIT(traverse_module_state->__pyx_n_s_env_var); - Py_VISIT(traverse_module_state->__pyx_n_s_environ); - Py_VISIT(traverse_module_state->__pyx_n_s_eps); - Py_VISIT(traverse_module_state->__pyx_n_s_exclude); - Py_VISIT(traverse_module_state->__pyx_n_s_exist_ok); - Py_VISIT(traverse_module_state->__pyx_n_s_exists); - Py_VISIT(traverse_module_state->__pyx_n_s_exit); - Py_VISIT(traverse_module_state->__pyx_n_s_f); - Py_VISIT(traverse_module_state->__pyx_n_s_file); - Py_VISIT(traverse_module_state->__pyx_n_s_file_2); - Py_VISIT(traverse_module_state->__pyx_n_s_file_size); - Py_VISIT(traverse_module_state->__pyx_n_s_file_size_locals_genexpr); - Py_VISIT(traverse_module_state->__pyx_n_s_fitness); - Py_VISIT(traverse_module_state->__pyx_n_s_float); - Py_VISIT(traverse_module_state->__pyx_n_u_float_kind); - Py_VISIT(traverse_module_state->__pyx_n_s_floor); - Py_VISIT(traverse_module_state->__pyx_n_s_format); - Py_VISIT(traverse_module_state->__pyx_n_s_formatter); - Py_VISIT(traverse_module_state->__pyx_n_s_from_numpy); - Py_VISIT(traverse_module_state->__pyx_n_s_func); - Py_VISIT(traverse_module_state->__pyx_n_s_gain); - Py_VISIT(traverse_module_state->__pyx_kp_u_gc); - Py_VISIT(traverse_module_state->__pyx_n_s_genexpr); - Py_VISIT(traverse_module_state->__pyx_n_s_get); - Py_VISIT(traverse_module_state->__pyx_n_s_getLogger); - Py_VISIT(traverse_module_state->__pyx_n_s_get_latest_run); - Py_VISIT(traverse_module_state->__pyx_n_s_get_terminal_size); - Py_VISIT(traverse_module_state->__pyx_n_s_getctime); - Py_VISIT(traverse_module_state->__pyx_n_s_getenv); - Py_VISIT(traverse_module_state->__pyx_n_s_glob); - Py_VISIT(traverse_module_state->__pyx_n_u_green); - Py_VISIT(traverse_module_state->__pyx_n_s_groups); - Py_VISIT(traverse_module_state->__pyx_n_s_h); - Py_VISIT(traverse_module_state->__pyx_n_s_handler); - Py_VISIT(traverse_module_state->__pyx_n_s_hard); - Py_VISIT(traverse_module_state->__pyx_n_s_home); - Py_VISIT(traverse_module_state->__pyx_n_s_i); - Py_VISIT(traverse_module_state->__pyx_n_u_ignore); - Py_VISIT(traverse_module_state->__pyx_n_s_image_weights); - Py_VISIT(traverse_module_state->__pyx_n_s_img0_shape); - Py_VISIT(traverse_module_state->__pyx_n_s_img1_shape); - Py_VISIT(traverse_module_state->__pyx_n_s_imgsz); - Py_VISIT(traverse_module_state->__pyx_n_s_import); - Py_VISIT(traverse_module_state->__pyx_n_s_increment_path); - Py_VISIT(traverse_module_state->__pyx_n_s_info); - Py_VISIT(traverse_module_state->__pyx_n_s_init_seeds); - Py_VISIT(traverse_module_state->__pyx_n_s_initializing); - Py_VISIT(traverse_module_state->__pyx_n_s_input); - Py_VISIT(traverse_module_state->__pyx_n_s_instance); - Py_VISIT(traverse_module_state->__pyx_n_s_int); - Py_VISIT(traverse_module_state->__pyx_n_s_intersect_dicts); - Py_VISIT(traverse_module_state->__pyx_n_s_intersect_dicts_locals_genexpr); - Py_VISIT(traverse_module_state->__pyx_n_s_iou); - Py_VISIT(traverse_module_state->__pyx_n_s_iou_thres); - Py_VISIT(traverse_module_state->__pyx_n_s_is_ascii); - Py_VISIT(traverse_module_state->__pyx_n_s_is_chinese); - Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine); - Py_VISIT(traverse_module_state->__pyx_n_s_is_dir); - Py_VISIT(traverse_module_state->__pyx_n_s_is_file); - Py_VISIT(traverse_module_state->__pyx_n_s_is_writeable); - Py_VISIT(traverse_module_state->__pyx_kp_u_isenabled); - Py_VISIT(traverse_module_state->__pyx_n_s_items); - Py_VISIT(traverse_module_state->__pyx_n_s_j); - Py_VISIT(traverse_module_state->__pyx_n_s_k); - Py_VISIT(traverse_module_state->__pyx_n_s_keepdim); - Py_VISIT(traverse_module_state->__pyx_n_s_key); - Py_VISIT(traverse_module_state->__pyx_n_s_kwargs); - Py_VISIT(traverse_module_state->__pyx_n_s_l); - Py_VISIT(traverse_module_state->__pyx_n_s_labels); - Py_VISIT(traverse_module_state->__pyx_n_s_labels_to_class_weights); - Py_VISIT(traverse_module_state->__pyx_n_s_labels_to_image_weights); - Py_VISIT(traverse_module_state->__pyx_n_s_last_list); - Py_VISIT(traverse_module_state->__pyx_kp_u_last_pt); - Py_VISIT(traverse_module_state->__pyx_n_s_level); - Py_VISIT(traverse_module_state->__pyx_n_s_linewidth); - Py_VISIT(traverse_module_state->__pyx_n_s_logging); - Py_VISIT(traverse_module_state->__pyx_n_s_long); - Py_VISIT(traverse_module_state->__pyx_n_u_long); - Py_VISIT(traverse_module_state->__pyx_n_s_lower); - Py_VISIT(traverse_module_state->__pyx_n_s_m); - Py_VISIT(traverse_module_state->__pyx_n_u_magenta); - Py_VISIT(traverse_module_state->__pyx_n_s_main); - Py_VISIT(traverse_module_state->__pyx_n_s_make_divisible); - Py_VISIT(traverse_module_state->__pyx_n_s_manual_seed); - Py_VISIT(traverse_module_state->__pyx_n_s_matches); - Py_VISIT(traverse_module_state->__pyx_n_s_math); - Py_VISIT(traverse_module_state->__pyx_n_s_max); - Py_VISIT(traverse_module_state->__pyx_n_s_max_columns); - Py_VISIT(traverse_module_state->__pyx_n_s_max_det); - Py_VISIT(traverse_module_state->__pyx_n_s_max_nms); - Py_VISIT(traverse_module_state->__pyx_n_s_max_wh); - Py_VISIT(traverse_module_state->__pyx_n_s_merge); - Py_VISIT(traverse_module_state->__pyx_kp_u_message_s); - Py_VISIT(traverse_module_state->__pyx_n_s_methods); - Py_VISIT(traverse_module_state->__pyx_n_s_min_wh); - Py_VISIT(traverse_module_state->__pyx_n_s_minimum); - Py_VISIT(traverse_module_state->__pyx_n_s_minlength); - Py_VISIT(traverse_module_state->__pyx_n_s_mkdir); - Py_VISIT(traverse_module_state->__pyx_n_s_mm); - Py_VISIT(traverse_module_state->__pyx_n_s_multi_label); - Py_VISIT(traverse_module_state->__pyx_kp_u_must_be_multiple_of_max_stride); - Py_VISIT(traverse_module_state->__pyx_n_s_n); - Py_VISIT(traverse_module_state->__pyx_n_s_name); - Py_VISIT(traverse_module_state->__pyx_n_s_name_2); - Py_VISIT(traverse_module_state->__pyx_n_s_nc); - Py_VISIT(traverse_module_state->__pyx_n_s_new_size); - Py_VISIT(traverse_module_state->__pyx_n_s_nms); - Py_VISIT(traverse_module_state->__pyx_n_s_non_max_suppression); - Py_VISIT(traverse_module_state->__pyx_n_s_nonzero); - Py_VISIT(traverse_module_state->__pyx_n_s_np); - Py_VISIT(traverse_module_state->__pyx_n_s_numpy); - Py_VISIT(traverse_module_state->__pyx_n_s_one_cycle); - Py_VISIT(traverse_module_state->__pyx_n_s_one_cycle_locals_lambda); - Py_VISIT(traverse_module_state->__pyx_n_s_ones); - Py_VISIT(traverse_module_state->__pyx_n_s_open); - Py_VISIT(traverse_module_state->__pyx_n_s_ops); - Py_VISIT(traverse_module_state->__pyx_n_s_opt); - Py_VISIT(traverse_module_state->__pyx_n_s_options); - Py_VISIT(traverse_module_state->__pyx_n_s_os); - Py_VISIT(traverse_module_state->__pyx_n_s_output); - Py_VISIT(traverse_module_state->__pyx_n_s_pad); - Py_VISIT(traverse_module_state->__pyx_n_s_padh); - Py_VISIT(traverse_module_state->__pyx_n_s_padw); - Py_VISIT(traverse_module_state->__pyx_n_s_pandas); - Py_VISIT(traverse_module_state->__pyx_n_s_parents); - Py_VISIT(traverse_module_state->__pyx_n_s_parse); - Py_VISIT(traverse_module_state->__pyx_n_s_path); - Py_VISIT(traverse_module_state->__pyx_n_s_pathlib); - Py_VISIT(traverse_module_state->__pyx_n_s_pattern); - Py_VISIT(traverse_module_state->__pyx_n_s_pd); - Py_VISIT(traverse_module_state->__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils); - Py_VISIT(traverse_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); - Py_VISIT(traverse_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3); - Py_VISIT(traverse_module_state->__pyx_n_s_pi); - Py_VISIT(traverse_module_state->__pyx_n_s_pinned); - Py_VISIT(traverse_module_state->__pyx_n_s_platform); - Py_VISIT(traverse_module_state->__pyx_n_s_precision); - Py_VISIT(traverse_module_state->__pyx_n_s_prediction); - Py_VISIT(traverse_module_state->__pyx_n_s_print); - Py_VISIT(traverse_module_state->__pyx_n_s_print_args); - Py_VISIT(traverse_module_state->__pyx_n_s_print_args_locals_genexpr); - Py_VISIT(traverse_module_state->__pyx_n_s_profile); - Py_VISIT(traverse_module_state->__pyx_n_s_python_version); - Py_VISIT(traverse_module_state->__pyx_n_s_random); - Py_VISIT(traverse_module_state->__pyx_n_s_range); - Py_VISIT(traverse_module_state->__pyx_n_s_rank); - Py_VISIT(traverse_module_state->__pyx_n_s_ratio_pad); - Py_VISIT(traverse_module_state->__pyx_n_s_re); - Py_VISIT(traverse_module_state->__pyx_n_s_recursive); - Py_VISIT(traverse_module_state->__pyx_n_u_red); - Py_VISIT(traverse_module_state->__pyx_n_s_redundant); - Py_VISIT(traverse_module_state->__pyx_n_s_repl); - Py_VISIT(traverse_module_state->__pyx_n_s_replace); - Py_VISIT(traverse_module_state->__pyx_n_s_reshape); - Py_VISIT(traverse_module_state->__pyx_n_s_resolve); - Py_VISIT(traverse_module_state->__pyx_n_s_s); - Py_VISIT(traverse_module_state->__pyx_kp_u_s_2); - Py_VISIT(traverse_module_state->__pyx_kp_u_s_exceeded); - Py_VISIT(traverse_module_state->__pyx_n_s_scale_coords); - Py_VISIT(traverse_module_state->__pyx_n_s_scores); - Py_VISIT(traverse_module_state->__pyx_n_s_search); - Py_VISIT(traverse_module_state->__pyx_n_s_search_dir); - Py_VISIT(traverse_module_state->__pyx_n_s_seed); - Py_VISIT(traverse_module_state->__pyx_n_s_send); - Py_VISIT(traverse_module_state->__pyx_n_s_sep); - Py_VISIT(traverse_module_state->__pyx_n_s_setNumThreads); - Py_VISIT(traverse_module_state->__pyx_n_s_set_logging); - Py_VISIT(traverse_module_state->__pyx_n_s_set_printoptions); - Py_VISIT(traverse_module_state->__pyx_n_s_shape); - Py_VISIT(traverse_module_state->__pyx_n_s_shutil); - Py_VISIT(traverse_module_state->__pyx_n_s_spec); - Py_VISIT(traverse_module_state->__pyx_n_s_split); - Py_VISIT(traverse_module_state->__pyx_n_s_st_size); - Py_VISIT(traverse_module_state->__pyx_n_s_startswith); - Py_VISIT(traverse_module_state->__pyx_n_s_stat); - Py_VISIT(traverse_module_state->__pyx_n_s_stem); - Py_VISIT(traverse_module_state->__pyx_n_s_steps); - Py_VISIT(traverse_module_state->__pyx_n_s_str); - Py_VISIT(traverse_module_state->__pyx_n_s_string); - Py_VISIT(traverse_module_state->__pyx_n_s_sub); - Py_VISIT(traverse_module_state->__pyx_n_s_suffix); - Py_VISIT(traverse_module_state->__pyx_n_s_sum); - Py_VISIT(traverse_module_state->__pyx_n_s_system); - Py_VISIT(traverse_module_state->__pyx_n_s_t); - Py_VISIT(traverse_module_state->__pyx_n_s_tensor); - Py_VISIT(traverse_module_state->__pyx_n_s_test); - Py_VISIT(traverse_module_state->__pyx_n_s_test_2); - Py_VISIT(traverse_module_state->__pyx_n_s_throw); - Py_VISIT(traverse_module_state->__pyx_n_s_time); - Py_VISIT(traverse_module_state->__pyx_n_s_time_limit); - Py_VISIT(traverse_module_state->__pyx_kp_u_tmp); - Py_VISIT(traverse_module_state->__pyx_kp_u_tmp_txt); - Py_VISIT(traverse_module_state->__pyx_n_s_torch); - Py_VISIT(traverse_module_state->__pyx_n_s_torch_backends_cudnn); - Py_VISIT(traverse_module_state->__pyx_n_s_torchvision); - Py_VISIT(traverse_module_state->__pyx_n_u_true); - Py_VISIT(traverse_module_state->__pyx_n_s_try_except); - Py_VISIT(traverse_module_state->__pyx_n_s_try_except_locals_handler); - Py_VISIT(traverse_module_state->__pyx_n_u_underline); - Py_VISIT(traverse_module_state->__pyx_n_s_unlink); - Py_VISIT(traverse_module_state->__pyx_n_s_unquote); - Py_VISIT(traverse_module_state->__pyx_kp_u_updating_to); - Py_VISIT(traverse_module_state->__pyx_n_s_url); - Py_VISIT(traverse_module_state->__pyx_n_s_url2file); - Py_VISIT(traverse_module_state->__pyx_n_s_urllib); - Py_VISIT(traverse_module_state->__pyx_n_s_user_config_dir); - Py_VISIT(traverse_module_state->__pyx_n_s_v); - Py_VISIT(traverse_module_state->__pyx_kp_u_valid_values_are_between_0_0_an); - Py_VISIT(traverse_module_state->__pyx_n_s_vars); - Py_VISIT(traverse_module_state->__pyx_n_s_verbose); - Py_VISIT(traverse_module_state->__pyx_kp_u_version); - Py_VISIT(traverse_module_state->__pyx_n_s_view); - Py_VISIT(traverse_module_state->__pyx_n_s_w); - Py_VISIT(traverse_module_state->__pyx_n_u_w); - Py_VISIT(traverse_module_state->__pyx_n_s_warning); - Py_VISIT(traverse_module_state->__pyx_n_s_weights); - Py_VISIT(traverse_module_state->__pyx_n_u_white); - Py_VISIT(traverse_module_state->__pyx_n_s_with_suffix); - Py_VISIT(traverse_module_state->__pyx_n_s_x); - Py_VISIT(traverse_module_state->__pyx_n_s_xc); - Py_VISIT(traverse_module_state->__pyx_n_s_xi); - Py_VISIT(traverse_module_state->__pyx_n_s_xyn2xy); - Py_VISIT(traverse_module_state->__pyx_n_s_xywh2xyxy); - Py_VISIT(traverse_module_state->__pyx_n_s_xywhn2xyxy); - Py_VISIT(traverse_module_state->__pyx_n_s_xyxy2xywh); - Py_VISIT(traverse_module_state->__pyx_n_s_xyxy2xywhn); - Py_VISIT(traverse_module_state->__pyx_n_s_y); - Py_VISIT(traverse_module_state->__pyx_n_s_y1); - Py_VISIT(traverse_module_state->__pyx_n_s_y2); - Py_VISIT(traverse_module_state->__pyx_n_s_yaml); - Py_VISIT(traverse_module_state->__pyx_n_u_yellow); - Py_VISIT(traverse_module_state->__pyx_n_u_yolov5); - Py_VISIT(traverse_module_state->__pyx_n_s_zeros); - Py_VISIT(traverse_module_state->__pyx_float_0_0); - Py_VISIT(traverse_module_state->__pyx_float_1_0); - Py_VISIT(traverse_module_state->__pyx_float_1E6); - Py_VISIT(traverse_module_state->__pyx_float_3E3); - Py_VISIT(traverse_module_state->__pyx_float_0_25); - Py_VISIT(traverse_module_state->__pyx_float_0_45); - Py_VISIT(traverse_module_state->__pyx_int_0); - Py_VISIT(traverse_module_state->__pyx_int_1); - Py_VISIT(traverse_module_state->__pyx_int_2); - Py_VISIT(traverse_module_state->__pyx_int_3); - Py_VISIT(traverse_module_state->__pyx_int_4); - Py_VISIT(traverse_module_state->__pyx_int_5); - Py_VISIT(traverse_module_state->__pyx_int_6); - Py_VISIT(traverse_module_state->__pyx_int_10); - Py_VISIT(traverse_module_state->__pyx_int_32); - Py_VISIT(traverse_module_state->__pyx_int_80); - Py_VISIT(traverse_module_state->__pyx_int_100); - Py_VISIT(traverse_module_state->__pyx_int_300); - Py_VISIT(traverse_module_state->__pyx_int_320); - Py_VISIT(traverse_module_state->__pyx_int_640); - Py_VISIT(traverse_module_state->__pyx_int_neg_1); - Py_VISIT(traverse_module_state->__pyx_tuple_); - Py_VISIT(traverse_module_state->__pyx_tuple__2); - Py_VISIT(traverse_module_state->__pyx_tuple__8); - Py_VISIT(traverse_module_state->__pyx_slice__23); - Py_VISIT(traverse_module_state->__pyx_slice__28); - Py_VISIT(traverse_module_state->__pyx_slice__33); - Py_VISIT(traverse_module_state->__pyx_slice__36); - Py_VISIT(traverse_module_state->__pyx_slice__38); - Py_VISIT(traverse_module_state->__pyx_slice__42); - Py_VISIT(traverse_module_state->__pyx_tuple__12); - Py_VISIT(traverse_module_state->__pyx_tuple__13); - Py_VISIT(traverse_module_state->__pyx_tuple__19); - Py_VISIT(traverse_module_state->__pyx_tuple__24); - Py_VISIT(traverse_module_state->__pyx_tuple__25); - Py_VISIT(traverse_module_state->__pyx_tuple__26); - Py_VISIT(traverse_module_state->__pyx_tuple__27); - Py_VISIT(traverse_module_state->__pyx_tuple__29); - Py_VISIT(traverse_module_state->__pyx_tuple__30); - Py_VISIT(traverse_module_state->__pyx_tuple__31); - Py_VISIT(traverse_module_state->__pyx_tuple__32); - Py_VISIT(traverse_module_state->__pyx_tuple__34); - Py_VISIT(traverse_module_state->__pyx_tuple__35); - Py_VISIT(traverse_module_state->__pyx_tuple__37); - Py_VISIT(traverse_module_state->__pyx_tuple__39); - Py_VISIT(traverse_module_state->__pyx_tuple__40); - Py_VISIT(traverse_module_state->__pyx_tuple__41); - Py_VISIT(traverse_module_state->__pyx_tuple__43); - Py_VISIT(traverse_module_state->__pyx_tuple__44); - Py_VISIT(traverse_module_state->__pyx_tuple__45); - Py_VISIT(traverse_module_state->__pyx_tuple__46); - Py_VISIT(traverse_module_state->__pyx_tuple__48); - Py_VISIT(traverse_module_state->__pyx_tuple__49); - Py_VISIT(traverse_module_state->__pyx_tuple__51); - Py_VISIT(traverse_module_state->__pyx_tuple__53); - Py_VISIT(traverse_module_state->__pyx_tuple__55); - Py_VISIT(traverse_module_state->__pyx_tuple__57); - Py_VISIT(traverse_module_state->__pyx_tuple__58); - Py_VISIT(traverse_module_state->__pyx_tuple__60); - Py_VISIT(traverse_module_state->__pyx_tuple__61); - Py_VISIT(traverse_module_state->__pyx_tuple__63); - Py_VISIT(traverse_module_state->__pyx_tuple__64); - Py_VISIT(traverse_module_state->__pyx_tuple__66); - Py_VISIT(traverse_module_state->__pyx_tuple__67); - Py_VISIT(traverse_module_state->__pyx_tuple__69); - Py_VISIT(traverse_module_state->__pyx_tuple__70); - Py_VISIT(traverse_module_state->__pyx_tuple__72); - Py_VISIT(traverse_module_state->__pyx_tuple__74); - Py_VISIT(traverse_module_state->__pyx_tuple__75); - Py_VISIT(traverse_module_state->__pyx_tuple__77); - Py_VISIT(traverse_module_state->__pyx_tuple__78); - Py_VISIT(traverse_module_state->__pyx_tuple__80); - Py_VISIT(traverse_module_state->__pyx_tuple__82); - Py_VISIT(traverse_module_state->__pyx_tuple__83); - Py_VISIT(traverse_module_state->__pyx_tuple__85); - Py_VISIT(traverse_module_state->__pyx_tuple__86); - Py_VISIT(traverse_module_state->__pyx_tuple__88); - Py_VISIT(traverse_module_state->__pyx_tuple__89); - Py_VISIT(traverse_module_state->__pyx_tuple__91); - Py_VISIT(traverse_module_state->__pyx_tuple__94); - Py_VISIT(traverse_module_state->__pyx_tuple__96); - Py_VISIT(traverse_module_state->__pyx_tuple__97); - Py_VISIT(traverse_module_state->__pyx_tuple__99); - Py_VISIT(traverse_module_state->__pyx_codeobj__3); - Py_VISIT(traverse_module_state->__pyx_tuple__101); - Py_VISIT(traverse_module_state->__pyx_tuple__102); - Py_VISIT(traverse_module_state->__pyx_tuple__104); - Py_VISIT(traverse_module_state->__pyx_tuple__105); - Py_VISIT(traverse_module_state->__pyx_tuple__108); - Py_VISIT(traverse_module_state->__pyx_tuple__110); - Py_VISIT(traverse_module_state->__pyx_tuple__111); - Py_VISIT(traverse_module_state->__pyx_tuple__113); - Py_VISIT(traverse_module_state->__pyx_tuple__115); - Py_VISIT(traverse_module_state->__pyx_tuple__116); - Py_VISIT(traverse_module_state->__pyx_tuple__118); - Py_VISIT(traverse_module_state->__pyx_tuple__119); - Py_VISIT(traverse_module_state->__pyx_tuple__121); - Py_VISIT(traverse_module_state->__pyx_tuple__123); - Py_VISIT(traverse_module_state->__pyx_tuple__124); - Py_VISIT(traverse_module_state->__pyx_tuple__126); - Py_VISIT(traverse_module_state->__pyx_codeobj__47); - Py_VISIT(traverse_module_state->__pyx_codeobj__50); - Py_VISIT(traverse_module_state->__pyx_codeobj__52); - Py_VISIT(traverse_module_state->__pyx_codeobj__54); - Py_VISIT(traverse_module_state->__pyx_codeobj__56); - Py_VISIT(traverse_module_state->__pyx_codeobj__59); - Py_VISIT(traverse_module_state->__pyx_codeobj__62); - Py_VISIT(traverse_module_state->__pyx_codeobj__65); - Py_VISIT(traverse_module_state->__pyx_codeobj__68); - Py_VISIT(traverse_module_state->__pyx_codeobj__71); - Py_VISIT(traverse_module_state->__pyx_codeobj__73); - Py_VISIT(traverse_module_state->__pyx_codeobj__76); - Py_VISIT(traverse_module_state->__pyx_codeobj__79); - Py_VISIT(traverse_module_state->__pyx_codeobj__81); - Py_VISIT(traverse_module_state->__pyx_codeobj__84); - Py_VISIT(traverse_module_state->__pyx_codeobj__87); - Py_VISIT(traverse_module_state->__pyx_codeobj__90); - Py_VISIT(traverse_module_state->__pyx_codeobj__92); - Py_VISIT(traverse_module_state->__pyx_codeobj__93); - Py_VISIT(traverse_module_state->__pyx_codeobj__95); - Py_VISIT(traverse_module_state->__pyx_codeobj__98); - Py_VISIT(traverse_module_state->__pyx_codeobj__100); - Py_VISIT(traverse_module_state->__pyx_codeobj__103); - Py_VISIT(traverse_module_state->__pyx_codeobj__106); - Py_VISIT(traverse_module_state->__pyx_codeobj__107); - Py_VISIT(traverse_module_state->__pyx_codeobj__109); - Py_VISIT(traverse_module_state->__pyx_codeobj__112); - Py_VISIT(traverse_module_state->__pyx_codeobj__114); - Py_VISIT(traverse_module_state->__pyx_codeobj__117); - Py_VISIT(traverse_module_state->__pyx_codeobj__120); - Py_VISIT(traverse_module_state->__pyx_codeobj__122); - Py_VISIT(traverse_module_state->__pyx_codeobj__125); - return 0; -} -#endif -/* #### Code section: module_state_defines ### */ -#if CYTHON_USE_MODULE_STATE -#define __pyx_d __pyx_mstate_global->__pyx_d -#define __pyx_b __pyx_mstate_global->__pyx_b -#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime -#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple -#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes -#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode -#ifdef __Pyx_CyFunction_USED -#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType -#endif -#ifdef __Pyx_FusedFunction_USED -#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType -#endif -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr -#define __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr __pyx_mstate_global->__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr -#define __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr __pyx_mstate_global->__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr -#define __pyx_kp_u_0_0_0 __pyx_mstate_global->__pyx_kp_u_0_0_0 -#define __pyx_kp_u_0m __pyx_mstate_global->__pyx_kp_u_0m -#define __pyx_kp_u_11_5g __pyx_mstate_global->__pyx_kp_u_11_5g -#define __pyx_kp_u_1m __pyx_mstate_global->__pyx_kp_u_1m -#define __pyx_kp_u_30m __pyx_mstate_global->__pyx_kp_u_30m -#define __pyx_kp_u_31m __pyx_mstate_global->__pyx_kp_u_31m -#define __pyx_kp_u_32m __pyx_mstate_global->__pyx_kp_u_32m -#define __pyx_kp_u_33m __pyx_mstate_global->__pyx_kp_u_33m -#define __pyx_kp_u_34m __pyx_mstate_global->__pyx_kp_u_34m -#define __pyx_kp_u_35m __pyx_mstate_global->__pyx_kp_u_35m -#define __pyx_kp_u_36m __pyx_mstate_global->__pyx_kp_u_36m -#define __pyx_kp_u_37m __pyx_mstate_global->__pyx_kp_u_37m -#define __pyx_kp_u_3_6_2 __pyx_mstate_global->__pyx_kp_u_3_6_2 -#define __pyx_kp_u_4m __pyx_mstate_global->__pyx_kp_u_4m -#define __pyx_kp_u_90m __pyx_mstate_global->__pyx_kp_u_90m -#define __pyx_kp_u_91m __pyx_mstate_global->__pyx_kp_u_91m -#define __pyx_kp_u_92m __pyx_mstate_global->__pyx_kp_u_92m -#define __pyx_kp_u_93m __pyx_mstate_global->__pyx_kp_u_93m -#define __pyx_kp_u_94m __pyx_mstate_global->__pyx_kp_u_94m -#define __pyx_kp_u_95m __pyx_mstate_global->__pyx_kp_u_95m -#define __pyx_kp_u_96m __pyx_mstate_global->__pyx_kp_u_96m -#define __pyx_kp_u_97m __pyx_mstate_global->__pyx_kp_u_97m -#define __pyx_kp_u_AppData_Roaming __pyx_mstate_global->__pyx_kp_u_AppData_Roaming -#define __pyx_n_s_AssertionError __pyx_mstate_global->__pyx_n_s_AssertionError -#define __pyx_n_u_Darwin __pyx_mstate_global->__pyx_n_u_Darwin -#define __pyx_n_s_FILE __pyx_mstate_global->__pyx_n_s_FILE -#define __pyx_n_s_INFO __pyx_mstate_global->__pyx_n_s_INFO -#define __pyx_kp_u_Invalid_Confidence_threshold __pyx_mstate_global->__pyx_kp_u_Invalid_Confidence_threshold -#define __pyx_kp_u_Invalid_IoU __pyx_mstate_global->__pyx_kp_u_Invalid_IoU -#define __pyx_n_s_LOGGER __pyx_mstate_global->__pyx_n_s_LOGGER -#define __pyx_kp_u_Library_Application_Support __pyx_mstate_global->__pyx_kp_u_Library_Application_Support -#define __pyx_n_u_Linux __pyx_mstate_global->__pyx_n_u_Linux -#define __pyx_n_s_NCOLS __pyx_mstate_global->__pyx_n_s_NCOLS -#define __pyx_n_u_NUMEXPR_MAX_THREADS __pyx_mstate_global->__pyx_n_u_NUMEXPR_MAX_THREADS -#define __pyx_n_s_NUM_THREADS __pyx_mstate_global->__pyx_n_s_NUM_THREADS -#define __pyx_n_s_OSError __pyx_mstate_global->__pyx_n_s_OSError -#define __pyx_n_s_Path __pyx_mstate_global->__pyx_n_s_Path -#define __pyx_kp_u_Python __pyx_mstate_global->__pyx_kp_u_Python -#define __pyx_n_u_RANK __pyx_mstate_global->__pyx_n_u_RANK -#define __pyx_n_s_ROOT __pyx_mstate_global->__pyx_n_s_ROOT -#define __pyx_n_s_R_OK __pyx_mstate_global->__pyx_n_s_R_OK -#define __pyx_n_s_T __pyx_mstate_global->__pyx_n_s_T -#define __pyx_n_s_Tensor __pyx_mstate_global->__pyx_n_s_Tensor -#define __pyx_n_u_Ultralytics __pyx_mstate_global->__pyx_n_u_Ultralytics -#define __pyx_n_s_VERBOSE __pyx_mstate_global->__pyx_n_s_VERBOSE -#define __pyx_n_s_WARNING __pyx_mstate_global->__pyx_n_s_WARNING -#define __pyx_kp_u_WARNING_NMS_time_limit __pyx_mstate_global->__pyx_kp_u_WARNING_NMS_time_limit -#define __pyx_kp_u_WARNING_img_size __pyx_mstate_global->__pyx_kp_u_WARNING_img_size -#define __pyx_n_u_Windows __pyx_mstate_global->__pyx_n_u_Windows -#define __pyx_n_u_YOLOV5_CONFIG_DIR __pyx_mstate_global->__pyx_n_u_YOLOV5_CONFIG_DIR -#define __pyx_n_u_YOLOv5_VERBOSE __pyx_mstate_global->__pyx_n_u_YOLOv5_VERBOSE -#define __pyx_kp_u__10 __pyx_mstate_global->__pyx_kp_u__10 -#define __pyx_kp_u__11 __pyx_mstate_global->__pyx_kp_u__11 -#define __pyx_n_u__14 __pyx_mstate_global->__pyx_n_u__14 -#define __pyx_kp_u__15 __pyx_mstate_global->__pyx_kp_u__15 -#define __pyx_kp_u__16 __pyx_mstate_global->__pyx_kp_u__16 -#define __pyx_kp_u__17 __pyx_mstate_global->__pyx_kp_u__17 -#define __pyx_kp_u__18 __pyx_mstate_global->__pyx_kp_u__18 -#define __pyx_n_s__20 __pyx_mstate_global->__pyx_n_s__20 -#define __pyx_kp_u__20 __pyx_mstate_global->__pyx_kp_u__20 -#define __pyx_kp_u__21 __pyx_mstate_global->__pyx_kp_u__21 -#define __pyx_n_u__22 __pyx_mstate_global->__pyx_n_u__22 -#define __pyx_n_u__4 __pyx_mstate_global->__pyx_n_u__4 -#define __pyx_kp_u__5 __pyx_mstate_global->__pyx_kp_u__5 -#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6 -#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7 -#define __pyx_n_s__9 __pyx_mstate_global->__pyx_n_s__9 -#define __pyx_kp_u__9 __pyx_mstate_global->__pyx_kp_u__9 -#define __pyx_n_s_access __pyx_mstate_global->__pyx_n_s_access -#define __pyx_n_s_agnostic __pyx_mstate_global->__pyx_n_s_agnostic -#define __pyx_n_s_any __pyx_mstate_global->__pyx_n_s_any -#define __pyx_n_s_args __pyx_mstate_global->__pyx_n_s_args -#define __pyx_n_s_argsort __pyx_mstate_global->__pyx_n_s_argsort -#define __pyx_n_s_array __pyx_mstate_global->__pyx_n_s_array -#define __pyx_n_s_as_tuple __pyx_mstate_global->__pyx_n_s_as_tuple -#define __pyx_n_u_ascii __pyx_mstate_global->__pyx_n_u_ascii -#define __pyx_n_s_astype __pyx_mstate_global->__pyx_n_s_astype -#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines -#define __pyx_n_s_backends __pyx_mstate_global->__pyx_n_s_backends -#define __pyx_n_s_basicConfig __pyx_mstate_global->__pyx_n_s_basicConfig -#define __pyx_n_s_benchmark __pyx_mstate_global->__pyx_n_s_benchmark -#define __pyx_n_s_bincount __pyx_mstate_global->__pyx_n_s_bincount -#define __pyx_n_u_black __pyx_mstate_global->__pyx_n_u_black -#define __pyx_n_u_blue __pyx_mstate_global->__pyx_n_u_blue -#define __pyx_n_u_bold __pyx_mstate_global->__pyx_n_u_bold -#define __pyx_n_s_box __pyx_mstate_global->__pyx_n_s_box -#define __pyx_n_s_box_iou __pyx_mstate_global->__pyx_n_s_box_iou -#define __pyx_n_s_boxes __pyx_mstate_global->__pyx_n_s_boxes -#define __pyx_n_u_bright_black __pyx_mstate_global->__pyx_n_u_bright_black -#define __pyx_n_u_bright_blue __pyx_mstate_global->__pyx_n_u_bright_blue -#define __pyx_n_u_bright_cyan __pyx_mstate_global->__pyx_n_u_bright_cyan -#define __pyx_n_u_bright_green __pyx_mstate_global->__pyx_n_u_bright_green -#define __pyx_n_u_bright_magenta __pyx_mstate_global->__pyx_n_u_bright_magenta -#define __pyx_n_u_bright_red __pyx_mstate_global->__pyx_n_u_bright_red -#define __pyx_n_u_bright_white __pyx_mstate_global->__pyx_n_u_bright_white -#define __pyx_n_u_bright_yellow __pyx_mstate_global->__pyx_n_u_bright_yellow -#define __pyx_n_s_c __pyx_mstate_global->__pyx_n_s_c -#define __pyx_n_s_cat __pyx_mstate_global->__pyx_n_s_cat -#define __pyx_n_s_ceil __pyx_mstate_global->__pyx_n_s_ceil -#define __pyx_n_s_cfg __pyx_mstate_global->__pyx_n_s_cfg -#define __pyx_n_s_check_img_size __pyx_mstate_global->__pyx_n_s_check_img_size -#define __pyx_n_s_check_python __pyx_mstate_global->__pyx_n_s_check_python -#define __pyx_n_s_check_version __pyx_mstate_global->__pyx_n_s_check_version -#define __pyx_n_s_clamp __pyx_mstate_global->__pyx_n_s_clamp -#define __pyx_n_s_class_counts __pyx_mstate_global->__pyx_n_s_class_counts -#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem -#define __pyx_n_s_class_weights __pyx_mstate_global->__pyx_n_s_class_weights -#define __pyx_n_s_classes __pyx_mstate_global->__pyx_n_s_classes -#define __pyx_n_s_clean_str __pyx_mstate_global->__pyx_n_s_clean_str -#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback -#define __pyx_n_s_clip __pyx_mstate_global->__pyx_n_s_clip -#define __pyx_n_s_clip_coords __pyx_mstate_global->__pyx_n_s_clip_coords -#define __pyx_n_s_clone __pyx_mstate_global->__pyx_n_s_clone -#define __pyx_n_s_close __pyx_mstate_global->__pyx_n_s_close -#define __pyx_n_s_colors __pyx_mstate_global->__pyx_n_s_colors -#define __pyx_n_s_colorstr __pyx_mstate_global->__pyx_n_s_colorstr -#define __pyx_n_s_colorstr_locals_genexpr __pyx_mstate_global->__pyx_n_s_colorstr_locals_genexpr -#define __pyx_n_s_columns __pyx_mstate_global->__pyx_n_s_columns -#define __pyx_n_s_concatenate __pyx_mstate_global->__pyx_n_s_concatenate -#define __pyx_n_s_conf __pyx_mstate_global->__pyx_n_s_conf -#define __pyx_n_s_conf_thres __pyx_mstate_global->__pyx_n_s_conf_thres -#define __pyx_kp_u_config __pyx_mstate_global->__pyx_kp_u_config -#define __pyx_n_s_coords __pyx_mstate_global->__pyx_n_s_coords -#define __pyx_n_s_copy __pyx_mstate_global->__pyx_n_s_copy -#define __pyx_n_s_cos __pyx_mstate_global->__pyx_n_s_cos -#define __pyx_n_s_cpu_count __pyx_mstate_global->__pyx_n_s_cpu_count -#define __pyx_n_s_cudnn __pyx_mstate_global->__pyx_n_s_cudnn -#define __pyx_n_s_current __pyx_mstate_global->__pyx_n_s_current -#define __pyx_n_s_cv2 __pyx_mstate_global->__pyx_n_s_cv2 -#define __pyx_n_u_cyan __pyx_mstate_global->__pyx_n_u_cyan -#define __pyx_kp_u_d __pyx_mstate_global->__pyx_kp_u_d -#define __pyx_n_s_d_2 __pyx_mstate_global->__pyx_n_s_d_2 -#define __pyx_n_s_da __pyx_mstate_global->__pyx_n_s_da -#define __pyx_n_s_db __pyx_mstate_global->__pyx_n_s_db -#define __pyx_n_s_decode __pyx_mstate_global->__pyx_n_s_decode -#define __pyx_n_s_descending __pyx_mstate_global->__pyx_n_s_descending -#define __pyx_n_s_deterministic __pyx_mstate_global->__pyx_n_s_deterministic -#define __pyx_n_s_device __pyx_mstate_global->__pyx_n_s_device -#define __pyx_n_s_dir __pyx_mstate_global->__pyx_n_s_dir -#define __pyx_n_s_dirs __pyx_mstate_global->__pyx_n_s_dirs -#define __pyx_kp_u_disable __pyx_mstate_global->__pyx_kp_u_disable -#define __pyx_n_s_display __pyx_mstate_global->__pyx_n_s_display -#define __pyx_n_s_divisor __pyx_mstate_global->__pyx_n_s_divisor -#define __pyx_n_s_e __pyx_mstate_global->__pyx_n_s_e -#define __pyx_n_s_emojis __pyx_mstate_global->__pyx_n_s_emojis -#define __pyx_kp_u_enable __pyx_mstate_global->__pyx_kp_u_enable -#define __pyx_n_s_encode __pyx_mstate_global->__pyx_n_s_encode -#define __pyx_n_u_end __pyx_mstate_global->__pyx_n_u_end -#define __pyx_n_s_enter __pyx_mstate_global->__pyx_n_s_enter -#define __pyx_n_s_enumerate __pyx_mstate_global->__pyx_n_s_enumerate -#define __pyx_n_s_env __pyx_mstate_global->__pyx_n_s_env -#define __pyx_n_s_env_var __pyx_mstate_global->__pyx_n_s_env_var -#define __pyx_n_s_environ __pyx_mstate_global->__pyx_n_s_environ -#define __pyx_n_s_eps __pyx_mstate_global->__pyx_n_s_eps -#define __pyx_n_s_exclude __pyx_mstate_global->__pyx_n_s_exclude -#define __pyx_n_s_exist_ok __pyx_mstate_global->__pyx_n_s_exist_ok -#define __pyx_n_s_exists __pyx_mstate_global->__pyx_n_s_exists -#define __pyx_n_s_exit __pyx_mstate_global->__pyx_n_s_exit -#define __pyx_n_s_f __pyx_mstate_global->__pyx_n_s_f -#define __pyx_n_s_file __pyx_mstate_global->__pyx_n_s_file -#define __pyx_n_s_file_2 __pyx_mstate_global->__pyx_n_s_file_2 -#define __pyx_n_s_file_size __pyx_mstate_global->__pyx_n_s_file_size -#define __pyx_n_s_file_size_locals_genexpr __pyx_mstate_global->__pyx_n_s_file_size_locals_genexpr -#define __pyx_n_s_fitness __pyx_mstate_global->__pyx_n_s_fitness -#define __pyx_n_s_float __pyx_mstate_global->__pyx_n_s_float -#define __pyx_n_u_float_kind __pyx_mstate_global->__pyx_n_u_float_kind -#define __pyx_n_s_floor __pyx_mstate_global->__pyx_n_s_floor -#define __pyx_n_s_format __pyx_mstate_global->__pyx_n_s_format -#define __pyx_n_s_formatter __pyx_mstate_global->__pyx_n_s_formatter -#define __pyx_n_s_from_numpy __pyx_mstate_global->__pyx_n_s_from_numpy -#define __pyx_n_s_func __pyx_mstate_global->__pyx_n_s_func -#define __pyx_n_s_gain __pyx_mstate_global->__pyx_n_s_gain -#define __pyx_kp_u_gc __pyx_mstate_global->__pyx_kp_u_gc -#define __pyx_n_s_genexpr __pyx_mstate_global->__pyx_n_s_genexpr -#define __pyx_n_s_get __pyx_mstate_global->__pyx_n_s_get -#define __pyx_n_s_getLogger __pyx_mstate_global->__pyx_n_s_getLogger -#define __pyx_n_s_get_latest_run __pyx_mstate_global->__pyx_n_s_get_latest_run -#define __pyx_n_s_get_terminal_size __pyx_mstate_global->__pyx_n_s_get_terminal_size -#define __pyx_n_s_getctime __pyx_mstate_global->__pyx_n_s_getctime -#define __pyx_n_s_getenv __pyx_mstate_global->__pyx_n_s_getenv -#define __pyx_n_s_glob __pyx_mstate_global->__pyx_n_s_glob -#define __pyx_n_u_green __pyx_mstate_global->__pyx_n_u_green -#define __pyx_n_s_groups __pyx_mstate_global->__pyx_n_s_groups -#define __pyx_n_s_h __pyx_mstate_global->__pyx_n_s_h -#define __pyx_n_s_handler __pyx_mstate_global->__pyx_n_s_handler -#define __pyx_n_s_hard __pyx_mstate_global->__pyx_n_s_hard -#define __pyx_n_s_home __pyx_mstate_global->__pyx_n_s_home -#define __pyx_n_s_i __pyx_mstate_global->__pyx_n_s_i -#define __pyx_n_u_ignore __pyx_mstate_global->__pyx_n_u_ignore -#define __pyx_n_s_image_weights __pyx_mstate_global->__pyx_n_s_image_weights -#define __pyx_n_s_img0_shape __pyx_mstate_global->__pyx_n_s_img0_shape -#define __pyx_n_s_img1_shape __pyx_mstate_global->__pyx_n_s_img1_shape -#define __pyx_n_s_imgsz __pyx_mstate_global->__pyx_n_s_imgsz -#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import -#define __pyx_n_s_increment_path __pyx_mstate_global->__pyx_n_s_increment_path -#define __pyx_n_s_info __pyx_mstate_global->__pyx_n_s_info -#define __pyx_n_s_init_seeds __pyx_mstate_global->__pyx_n_s_init_seeds -#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing -#define __pyx_n_s_input __pyx_mstate_global->__pyx_n_s_input -#define __pyx_n_s_instance __pyx_mstate_global->__pyx_n_s_instance -#define __pyx_n_s_int __pyx_mstate_global->__pyx_n_s_int -#define __pyx_n_s_intersect_dicts __pyx_mstate_global->__pyx_n_s_intersect_dicts -#define __pyx_n_s_intersect_dicts_locals_genexpr __pyx_mstate_global->__pyx_n_s_intersect_dicts_locals_genexpr -#define __pyx_n_s_iou __pyx_mstate_global->__pyx_n_s_iou -#define __pyx_n_s_iou_thres __pyx_mstate_global->__pyx_n_s_iou_thres -#define __pyx_n_s_is_ascii __pyx_mstate_global->__pyx_n_s_is_ascii -#define __pyx_n_s_is_chinese __pyx_mstate_global->__pyx_n_s_is_chinese -#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine -#define __pyx_n_s_is_dir __pyx_mstate_global->__pyx_n_s_is_dir -#define __pyx_n_s_is_file __pyx_mstate_global->__pyx_n_s_is_file -#define __pyx_n_s_is_writeable __pyx_mstate_global->__pyx_n_s_is_writeable -#define __pyx_kp_u_isenabled __pyx_mstate_global->__pyx_kp_u_isenabled -#define __pyx_n_s_items __pyx_mstate_global->__pyx_n_s_items -#define __pyx_n_s_j __pyx_mstate_global->__pyx_n_s_j -#define __pyx_n_s_k __pyx_mstate_global->__pyx_n_s_k -#define __pyx_n_s_keepdim __pyx_mstate_global->__pyx_n_s_keepdim -#define __pyx_n_s_key __pyx_mstate_global->__pyx_n_s_key -#define __pyx_n_s_kwargs __pyx_mstate_global->__pyx_n_s_kwargs -#define __pyx_n_s_l __pyx_mstate_global->__pyx_n_s_l -#define __pyx_n_s_labels __pyx_mstate_global->__pyx_n_s_labels -#define __pyx_n_s_labels_to_class_weights __pyx_mstate_global->__pyx_n_s_labels_to_class_weights -#define __pyx_n_s_labels_to_image_weights __pyx_mstate_global->__pyx_n_s_labels_to_image_weights -#define __pyx_n_s_last_list __pyx_mstate_global->__pyx_n_s_last_list -#define __pyx_kp_u_last_pt __pyx_mstate_global->__pyx_kp_u_last_pt -#define __pyx_n_s_level __pyx_mstate_global->__pyx_n_s_level -#define __pyx_n_s_linewidth __pyx_mstate_global->__pyx_n_s_linewidth -#define __pyx_n_s_logging __pyx_mstate_global->__pyx_n_s_logging -#define __pyx_n_s_long __pyx_mstate_global->__pyx_n_s_long -#define __pyx_n_u_long __pyx_mstate_global->__pyx_n_u_long -#define __pyx_n_s_lower __pyx_mstate_global->__pyx_n_s_lower -#define __pyx_n_s_m __pyx_mstate_global->__pyx_n_s_m -#define __pyx_n_u_magenta __pyx_mstate_global->__pyx_n_u_magenta -#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main -#define __pyx_n_s_make_divisible __pyx_mstate_global->__pyx_n_s_make_divisible -#define __pyx_n_s_manual_seed __pyx_mstate_global->__pyx_n_s_manual_seed -#define __pyx_n_s_matches __pyx_mstate_global->__pyx_n_s_matches -#define __pyx_n_s_math __pyx_mstate_global->__pyx_n_s_math -#define __pyx_n_s_max __pyx_mstate_global->__pyx_n_s_max -#define __pyx_n_s_max_columns __pyx_mstate_global->__pyx_n_s_max_columns -#define __pyx_n_s_max_det __pyx_mstate_global->__pyx_n_s_max_det -#define __pyx_n_s_max_nms __pyx_mstate_global->__pyx_n_s_max_nms -#define __pyx_n_s_max_wh __pyx_mstate_global->__pyx_n_s_max_wh -#define __pyx_n_s_merge __pyx_mstate_global->__pyx_n_s_merge -#define __pyx_kp_u_message_s __pyx_mstate_global->__pyx_kp_u_message_s -#define __pyx_n_s_methods __pyx_mstate_global->__pyx_n_s_methods -#define __pyx_n_s_min_wh __pyx_mstate_global->__pyx_n_s_min_wh -#define __pyx_n_s_minimum __pyx_mstate_global->__pyx_n_s_minimum -#define __pyx_n_s_minlength __pyx_mstate_global->__pyx_n_s_minlength -#define __pyx_n_s_mkdir __pyx_mstate_global->__pyx_n_s_mkdir -#define __pyx_n_s_mm __pyx_mstate_global->__pyx_n_s_mm -#define __pyx_n_s_multi_label __pyx_mstate_global->__pyx_n_s_multi_label -#define __pyx_kp_u_must_be_multiple_of_max_stride __pyx_mstate_global->__pyx_kp_u_must_be_multiple_of_max_stride -#define __pyx_n_s_n __pyx_mstate_global->__pyx_n_s_n -#define __pyx_n_s_name __pyx_mstate_global->__pyx_n_s_name -#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2 -#define __pyx_n_s_nc __pyx_mstate_global->__pyx_n_s_nc -#define __pyx_n_s_new_size __pyx_mstate_global->__pyx_n_s_new_size -#define __pyx_n_s_nms __pyx_mstate_global->__pyx_n_s_nms -#define __pyx_n_s_non_max_suppression __pyx_mstate_global->__pyx_n_s_non_max_suppression -#define __pyx_n_s_nonzero __pyx_mstate_global->__pyx_n_s_nonzero -#define __pyx_n_s_np __pyx_mstate_global->__pyx_n_s_np -#define __pyx_n_s_numpy __pyx_mstate_global->__pyx_n_s_numpy -#define __pyx_n_s_one_cycle __pyx_mstate_global->__pyx_n_s_one_cycle -#define __pyx_n_s_one_cycle_locals_lambda __pyx_mstate_global->__pyx_n_s_one_cycle_locals_lambda -#define __pyx_n_s_ones __pyx_mstate_global->__pyx_n_s_ones -#define __pyx_n_s_open __pyx_mstate_global->__pyx_n_s_open -#define __pyx_n_s_ops __pyx_mstate_global->__pyx_n_s_ops -#define __pyx_n_s_opt __pyx_mstate_global->__pyx_n_s_opt -#define __pyx_n_s_options __pyx_mstate_global->__pyx_n_s_options -#define __pyx_n_s_os __pyx_mstate_global->__pyx_n_s_os -#define __pyx_n_s_output __pyx_mstate_global->__pyx_n_s_output -#define __pyx_n_s_pad __pyx_mstate_global->__pyx_n_s_pad -#define __pyx_n_s_padh __pyx_mstate_global->__pyx_n_s_padh -#define __pyx_n_s_padw __pyx_mstate_global->__pyx_n_s_padw -#define __pyx_n_s_pandas __pyx_mstate_global->__pyx_n_s_pandas -#define __pyx_n_s_parents __pyx_mstate_global->__pyx_n_s_parents -#define __pyx_n_s_parse __pyx_mstate_global->__pyx_n_s_parse -#define __pyx_n_s_path __pyx_mstate_global->__pyx_n_s_path -#define __pyx_n_s_pathlib __pyx_mstate_global->__pyx_n_s_pathlib -#define __pyx_n_s_pattern __pyx_mstate_global->__pyx_n_s_pattern -#define __pyx_n_s_pd __pyx_mstate_global->__pyx_n_s_pd -#define __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils __pyx_mstate_global->__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils -#define __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2 __pyx_mstate_global->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2 -#define __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3 __pyx_mstate_global->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3 -#define __pyx_n_s_pi __pyx_mstate_global->__pyx_n_s_pi -#define __pyx_n_s_pinned __pyx_mstate_global->__pyx_n_s_pinned -#define __pyx_n_s_platform __pyx_mstate_global->__pyx_n_s_platform -#define __pyx_n_s_precision __pyx_mstate_global->__pyx_n_s_precision -#define __pyx_n_s_prediction __pyx_mstate_global->__pyx_n_s_prediction -#define __pyx_n_s_print __pyx_mstate_global->__pyx_n_s_print -#define __pyx_n_s_print_args __pyx_mstate_global->__pyx_n_s_print_args -#define __pyx_n_s_print_args_locals_genexpr __pyx_mstate_global->__pyx_n_s_print_args_locals_genexpr -#define __pyx_n_s_profile __pyx_mstate_global->__pyx_n_s_profile -#define __pyx_n_s_python_version __pyx_mstate_global->__pyx_n_s_python_version -#define __pyx_n_s_random __pyx_mstate_global->__pyx_n_s_random -#define __pyx_n_s_range __pyx_mstate_global->__pyx_n_s_range -#define __pyx_n_s_rank __pyx_mstate_global->__pyx_n_s_rank -#define __pyx_n_s_ratio_pad __pyx_mstate_global->__pyx_n_s_ratio_pad -#define __pyx_n_s_re __pyx_mstate_global->__pyx_n_s_re -#define __pyx_n_s_recursive __pyx_mstate_global->__pyx_n_s_recursive -#define __pyx_n_u_red __pyx_mstate_global->__pyx_n_u_red -#define __pyx_n_s_redundant __pyx_mstate_global->__pyx_n_s_redundant -#define __pyx_n_s_repl __pyx_mstate_global->__pyx_n_s_repl -#define __pyx_n_s_replace __pyx_mstate_global->__pyx_n_s_replace -#define __pyx_n_s_reshape __pyx_mstate_global->__pyx_n_s_reshape -#define __pyx_n_s_resolve __pyx_mstate_global->__pyx_n_s_resolve -#define __pyx_n_s_s __pyx_mstate_global->__pyx_n_s_s -#define __pyx_kp_u_s_2 __pyx_mstate_global->__pyx_kp_u_s_2 -#define __pyx_kp_u_s_exceeded __pyx_mstate_global->__pyx_kp_u_s_exceeded -#define __pyx_n_s_scale_coords __pyx_mstate_global->__pyx_n_s_scale_coords -#define __pyx_n_s_scores __pyx_mstate_global->__pyx_n_s_scores -#define __pyx_n_s_search __pyx_mstate_global->__pyx_n_s_search -#define __pyx_n_s_search_dir __pyx_mstate_global->__pyx_n_s_search_dir -#define __pyx_n_s_seed __pyx_mstate_global->__pyx_n_s_seed -#define __pyx_n_s_send __pyx_mstate_global->__pyx_n_s_send -#define __pyx_n_s_sep __pyx_mstate_global->__pyx_n_s_sep -#define __pyx_n_s_setNumThreads __pyx_mstate_global->__pyx_n_s_setNumThreads -#define __pyx_n_s_set_logging __pyx_mstate_global->__pyx_n_s_set_logging -#define __pyx_n_s_set_printoptions __pyx_mstate_global->__pyx_n_s_set_printoptions -#define __pyx_n_s_shape __pyx_mstate_global->__pyx_n_s_shape -#define __pyx_n_s_shutil __pyx_mstate_global->__pyx_n_s_shutil -#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec -#define __pyx_n_s_split __pyx_mstate_global->__pyx_n_s_split -#define __pyx_n_s_st_size __pyx_mstate_global->__pyx_n_s_st_size -#define __pyx_n_s_startswith __pyx_mstate_global->__pyx_n_s_startswith -#define __pyx_n_s_stat __pyx_mstate_global->__pyx_n_s_stat -#define __pyx_n_s_stem __pyx_mstate_global->__pyx_n_s_stem -#define __pyx_n_s_steps __pyx_mstate_global->__pyx_n_s_steps -#define __pyx_n_s_str __pyx_mstate_global->__pyx_n_s_str -#define __pyx_n_s_string __pyx_mstate_global->__pyx_n_s_string -#define __pyx_n_s_sub __pyx_mstate_global->__pyx_n_s_sub -#define __pyx_n_s_suffix __pyx_mstate_global->__pyx_n_s_suffix -#define __pyx_n_s_sum __pyx_mstate_global->__pyx_n_s_sum -#define __pyx_n_s_system __pyx_mstate_global->__pyx_n_s_system -#define __pyx_n_s_t __pyx_mstate_global->__pyx_n_s_t -#define __pyx_n_s_tensor __pyx_mstate_global->__pyx_n_s_tensor -#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test -#define __pyx_n_s_test_2 __pyx_mstate_global->__pyx_n_s_test_2 -#define __pyx_n_s_throw __pyx_mstate_global->__pyx_n_s_throw -#define __pyx_n_s_time __pyx_mstate_global->__pyx_n_s_time -#define __pyx_n_s_time_limit __pyx_mstate_global->__pyx_n_s_time_limit -#define __pyx_kp_u_tmp __pyx_mstate_global->__pyx_kp_u_tmp -#define __pyx_kp_u_tmp_txt __pyx_mstate_global->__pyx_kp_u_tmp_txt -#define __pyx_n_s_torch __pyx_mstate_global->__pyx_n_s_torch -#define __pyx_n_s_torch_backends_cudnn __pyx_mstate_global->__pyx_n_s_torch_backends_cudnn -#define __pyx_n_s_torchvision __pyx_mstate_global->__pyx_n_s_torchvision -#define __pyx_n_u_true __pyx_mstate_global->__pyx_n_u_true -#define __pyx_n_s_try_except __pyx_mstate_global->__pyx_n_s_try_except -#define __pyx_n_s_try_except_locals_handler __pyx_mstate_global->__pyx_n_s_try_except_locals_handler -#define __pyx_n_u_underline __pyx_mstate_global->__pyx_n_u_underline -#define __pyx_n_s_unlink __pyx_mstate_global->__pyx_n_s_unlink -#define __pyx_n_s_unquote __pyx_mstate_global->__pyx_n_s_unquote -#define __pyx_kp_u_updating_to __pyx_mstate_global->__pyx_kp_u_updating_to -#define __pyx_n_s_url __pyx_mstate_global->__pyx_n_s_url -#define __pyx_n_s_url2file __pyx_mstate_global->__pyx_n_s_url2file -#define __pyx_n_s_urllib __pyx_mstate_global->__pyx_n_s_urllib -#define __pyx_n_s_user_config_dir __pyx_mstate_global->__pyx_n_s_user_config_dir -#define __pyx_n_s_v __pyx_mstate_global->__pyx_n_s_v -#define __pyx_kp_u_valid_values_are_between_0_0_an __pyx_mstate_global->__pyx_kp_u_valid_values_are_between_0_0_an -#define __pyx_n_s_vars __pyx_mstate_global->__pyx_n_s_vars -#define __pyx_n_s_verbose __pyx_mstate_global->__pyx_n_s_verbose -#define __pyx_kp_u_version __pyx_mstate_global->__pyx_kp_u_version -#define __pyx_n_s_view __pyx_mstate_global->__pyx_n_s_view -#define __pyx_n_s_w __pyx_mstate_global->__pyx_n_s_w -#define __pyx_n_u_w __pyx_mstate_global->__pyx_n_u_w -#define __pyx_n_s_warning __pyx_mstate_global->__pyx_n_s_warning -#define __pyx_n_s_weights __pyx_mstate_global->__pyx_n_s_weights -#define __pyx_n_u_white __pyx_mstate_global->__pyx_n_u_white -#define __pyx_n_s_with_suffix __pyx_mstate_global->__pyx_n_s_with_suffix -#define __pyx_n_s_x __pyx_mstate_global->__pyx_n_s_x -#define __pyx_n_s_xc __pyx_mstate_global->__pyx_n_s_xc -#define __pyx_n_s_xi __pyx_mstate_global->__pyx_n_s_xi -#define __pyx_n_s_xyn2xy __pyx_mstate_global->__pyx_n_s_xyn2xy -#define __pyx_n_s_xywh2xyxy __pyx_mstate_global->__pyx_n_s_xywh2xyxy -#define __pyx_n_s_xywhn2xyxy __pyx_mstate_global->__pyx_n_s_xywhn2xyxy -#define __pyx_n_s_xyxy2xywh __pyx_mstate_global->__pyx_n_s_xyxy2xywh -#define __pyx_n_s_xyxy2xywhn __pyx_mstate_global->__pyx_n_s_xyxy2xywhn -#define __pyx_n_s_y __pyx_mstate_global->__pyx_n_s_y -#define __pyx_n_s_y1 __pyx_mstate_global->__pyx_n_s_y1 -#define __pyx_n_s_y2 __pyx_mstate_global->__pyx_n_s_y2 -#define __pyx_n_s_yaml __pyx_mstate_global->__pyx_n_s_yaml -#define __pyx_n_u_yellow __pyx_mstate_global->__pyx_n_u_yellow -#define __pyx_n_u_yolov5 __pyx_mstate_global->__pyx_n_u_yolov5 -#define __pyx_n_s_zeros __pyx_mstate_global->__pyx_n_s_zeros -#define __pyx_float_0_0 __pyx_mstate_global->__pyx_float_0_0 -#define __pyx_float_1_0 __pyx_mstate_global->__pyx_float_1_0 -#define __pyx_float_1E6 __pyx_mstate_global->__pyx_float_1E6 -#define __pyx_float_3E3 __pyx_mstate_global->__pyx_float_3E3 -#define __pyx_float_0_25 __pyx_mstate_global->__pyx_float_0_25 -#define __pyx_float_0_45 __pyx_mstate_global->__pyx_float_0_45 -#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0 -#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1 -#define __pyx_int_2 __pyx_mstate_global->__pyx_int_2 -#define __pyx_int_3 __pyx_mstate_global->__pyx_int_3 -#define __pyx_int_4 __pyx_mstate_global->__pyx_int_4 -#define __pyx_int_5 __pyx_mstate_global->__pyx_int_5 -#define __pyx_int_6 __pyx_mstate_global->__pyx_int_6 -#define __pyx_int_10 __pyx_mstate_global->__pyx_int_10 -#define __pyx_int_32 __pyx_mstate_global->__pyx_int_32 -#define __pyx_int_80 __pyx_mstate_global->__pyx_int_80 -#define __pyx_int_100 __pyx_mstate_global->__pyx_int_100 -#define __pyx_int_300 __pyx_mstate_global->__pyx_int_300 -#define __pyx_int_320 __pyx_mstate_global->__pyx_int_320 -#define __pyx_int_640 __pyx_mstate_global->__pyx_int_640 -#define __pyx_int_neg_1 __pyx_mstate_global->__pyx_int_neg_1 -#define __pyx_tuple_ __pyx_mstate_global->__pyx_tuple_ -#define __pyx_tuple__2 __pyx_mstate_global->__pyx_tuple__2 -#define __pyx_tuple__8 __pyx_mstate_global->__pyx_tuple__8 -#define __pyx_slice__23 __pyx_mstate_global->__pyx_slice__23 -#define __pyx_slice__28 __pyx_mstate_global->__pyx_slice__28 -#define __pyx_slice__33 __pyx_mstate_global->__pyx_slice__33 -#define __pyx_slice__36 __pyx_mstate_global->__pyx_slice__36 -#define __pyx_slice__38 __pyx_mstate_global->__pyx_slice__38 -#define __pyx_slice__42 __pyx_mstate_global->__pyx_slice__42 -#define __pyx_tuple__12 __pyx_mstate_global->__pyx_tuple__12 -#define __pyx_tuple__13 __pyx_mstate_global->__pyx_tuple__13 -#define __pyx_tuple__19 __pyx_mstate_global->__pyx_tuple__19 -#define __pyx_tuple__24 __pyx_mstate_global->__pyx_tuple__24 -#define __pyx_tuple__25 __pyx_mstate_global->__pyx_tuple__25 -#define __pyx_tuple__26 __pyx_mstate_global->__pyx_tuple__26 -#define __pyx_tuple__27 __pyx_mstate_global->__pyx_tuple__27 -#define __pyx_tuple__29 __pyx_mstate_global->__pyx_tuple__29 -#define __pyx_tuple__30 __pyx_mstate_global->__pyx_tuple__30 -#define __pyx_tuple__31 __pyx_mstate_global->__pyx_tuple__31 -#define __pyx_tuple__32 __pyx_mstate_global->__pyx_tuple__32 -#define __pyx_tuple__34 __pyx_mstate_global->__pyx_tuple__34 -#define __pyx_tuple__35 __pyx_mstate_global->__pyx_tuple__35 -#define __pyx_tuple__37 __pyx_mstate_global->__pyx_tuple__37 -#define __pyx_tuple__39 __pyx_mstate_global->__pyx_tuple__39 -#define __pyx_tuple__40 __pyx_mstate_global->__pyx_tuple__40 -#define __pyx_tuple__41 __pyx_mstate_global->__pyx_tuple__41 -#define __pyx_tuple__43 __pyx_mstate_global->__pyx_tuple__43 -#define __pyx_tuple__44 __pyx_mstate_global->__pyx_tuple__44 -#define __pyx_tuple__45 __pyx_mstate_global->__pyx_tuple__45 -#define __pyx_tuple__46 __pyx_mstate_global->__pyx_tuple__46 -#define __pyx_tuple__48 __pyx_mstate_global->__pyx_tuple__48 -#define __pyx_tuple__49 __pyx_mstate_global->__pyx_tuple__49 -#define __pyx_tuple__51 __pyx_mstate_global->__pyx_tuple__51 -#define __pyx_tuple__53 __pyx_mstate_global->__pyx_tuple__53 -#define __pyx_tuple__55 __pyx_mstate_global->__pyx_tuple__55 -#define __pyx_tuple__57 __pyx_mstate_global->__pyx_tuple__57 -#define __pyx_tuple__58 __pyx_mstate_global->__pyx_tuple__58 -#define __pyx_tuple__60 __pyx_mstate_global->__pyx_tuple__60 -#define __pyx_tuple__61 __pyx_mstate_global->__pyx_tuple__61 -#define __pyx_tuple__63 __pyx_mstate_global->__pyx_tuple__63 -#define __pyx_tuple__64 __pyx_mstate_global->__pyx_tuple__64 -#define __pyx_tuple__66 __pyx_mstate_global->__pyx_tuple__66 -#define __pyx_tuple__67 __pyx_mstate_global->__pyx_tuple__67 -#define __pyx_tuple__69 __pyx_mstate_global->__pyx_tuple__69 -#define __pyx_tuple__70 __pyx_mstate_global->__pyx_tuple__70 -#define __pyx_tuple__72 __pyx_mstate_global->__pyx_tuple__72 -#define __pyx_tuple__74 __pyx_mstate_global->__pyx_tuple__74 -#define __pyx_tuple__75 __pyx_mstate_global->__pyx_tuple__75 -#define __pyx_tuple__77 __pyx_mstate_global->__pyx_tuple__77 -#define __pyx_tuple__78 __pyx_mstate_global->__pyx_tuple__78 -#define __pyx_tuple__80 __pyx_mstate_global->__pyx_tuple__80 -#define __pyx_tuple__82 __pyx_mstate_global->__pyx_tuple__82 -#define __pyx_tuple__83 __pyx_mstate_global->__pyx_tuple__83 -#define __pyx_tuple__85 __pyx_mstate_global->__pyx_tuple__85 -#define __pyx_tuple__86 __pyx_mstate_global->__pyx_tuple__86 -#define __pyx_tuple__88 __pyx_mstate_global->__pyx_tuple__88 -#define __pyx_tuple__89 __pyx_mstate_global->__pyx_tuple__89 -#define __pyx_tuple__91 __pyx_mstate_global->__pyx_tuple__91 -#define __pyx_tuple__94 __pyx_mstate_global->__pyx_tuple__94 -#define __pyx_tuple__96 __pyx_mstate_global->__pyx_tuple__96 -#define __pyx_tuple__97 __pyx_mstate_global->__pyx_tuple__97 -#define __pyx_tuple__99 __pyx_mstate_global->__pyx_tuple__99 -#define __pyx_codeobj__3 __pyx_mstate_global->__pyx_codeobj__3 -#define __pyx_tuple__101 __pyx_mstate_global->__pyx_tuple__101 -#define __pyx_tuple__102 __pyx_mstate_global->__pyx_tuple__102 -#define __pyx_tuple__104 __pyx_mstate_global->__pyx_tuple__104 -#define __pyx_tuple__105 __pyx_mstate_global->__pyx_tuple__105 -#define __pyx_tuple__108 __pyx_mstate_global->__pyx_tuple__108 -#define __pyx_tuple__110 __pyx_mstate_global->__pyx_tuple__110 -#define __pyx_tuple__111 __pyx_mstate_global->__pyx_tuple__111 -#define __pyx_tuple__113 __pyx_mstate_global->__pyx_tuple__113 -#define __pyx_tuple__115 __pyx_mstate_global->__pyx_tuple__115 -#define __pyx_tuple__116 __pyx_mstate_global->__pyx_tuple__116 -#define __pyx_tuple__118 __pyx_mstate_global->__pyx_tuple__118 -#define __pyx_tuple__119 __pyx_mstate_global->__pyx_tuple__119 -#define __pyx_tuple__121 __pyx_mstate_global->__pyx_tuple__121 -#define __pyx_tuple__123 __pyx_mstate_global->__pyx_tuple__123 -#define __pyx_tuple__124 __pyx_mstate_global->__pyx_tuple__124 -#define __pyx_tuple__126 __pyx_mstate_global->__pyx_tuple__126 -#define __pyx_codeobj__47 __pyx_mstate_global->__pyx_codeobj__47 -#define __pyx_codeobj__50 __pyx_mstate_global->__pyx_codeobj__50 -#define __pyx_codeobj__52 __pyx_mstate_global->__pyx_codeobj__52 -#define __pyx_codeobj__54 __pyx_mstate_global->__pyx_codeobj__54 -#define __pyx_codeobj__56 __pyx_mstate_global->__pyx_codeobj__56 -#define __pyx_codeobj__59 __pyx_mstate_global->__pyx_codeobj__59 -#define __pyx_codeobj__62 __pyx_mstate_global->__pyx_codeobj__62 -#define __pyx_codeobj__65 __pyx_mstate_global->__pyx_codeobj__65 -#define __pyx_codeobj__68 __pyx_mstate_global->__pyx_codeobj__68 -#define __pyx_codeobj__71 __pyx_mstate_global->__pyx_codeobj__71 -#define __pyx_codeobj__73 __pyx_mstate_global->__pyx_codeobj__73 -#define __pyx_codeobj__76 __pyx_mstate_global->__pyx_codeobj__76 -#define __pyx_codeobj__79 __pyx_mstate_global->__pyx_codeobj__79 -#define __pyx_codeobj__81 __pyx_mstate_global->__pyx_codeobj__81 -#define __pyx_codeobj__84 __pyx_mstate_global->__pyx_codeobj__84 -#define __pyx_codeobj__87 __pyx_mstate_global->__pyx_codeobj__87 -#define __pyx_codeobj__90 __pyx_mstate_global->__pyx_codeobj__90 -#define __pyx_codeobj__92 __pyx_mstate_global->__pyx_codeobj__92 -#define __pyx_codeobj__93 __pyx_mstate_global->__pyx_codeobj__93 -#define __pyx_codeobj__95 __pyx_mstate_global->__pyx_codeobj__95 -#define __pyx_codeobj__98 __pyx_mstate_global->__pyx_codeobj__98 -#define __pyx_codeobj__100 __pyx_mstate_global->__pyx_codeobj__100 -#define __pyx_codeobj__103 __pyx_mstate_global->__pyx_codeobj__103 -#define __pyx_codeobj__106 __pyx_mstate_global->__pyx_codeobj__106 -#define __pyx_codeobj__107 __pyx_mstate_global->__pyx_codeobj__107 -#define __pyx_codeobj__109 __pyx_mstate_global->__pyx_codeobj__109 -#define __pyx_codeobj__112 __pyx_mstate_global->__pyx_codeobj__112 -#define __pyx_codeobj__114 __pyx_mstate_global->__pyx_codeobj__114 -#define __pyx_codeobj__117 __pyx_mstate_global->__pyx_codeobj__117 -#define __pyx_codeobj__120 __pyx_mstate_global->__pyx_codeobj__120 -#define __pyx_codeobj__122 __pyx_mstate_global->__pyx_codeobj__122 -#define __pyx_codeobj__125 __pyx_mstate_global->__pyx_codeobj__125 -#endif -/* #### Code section: module_code ### */ - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":40 - * - * - * def set_logging(name=None, verbose=VERBOSE): # <<<<<<<<<<<<<< - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_64__defaults__(CYTHON_UNUSED PyObject *__pyx_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__defaults__", 0); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_None)); - __Pyx_GIVEREF(((PyObject *)Py_None)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_None)); - __Pyx_INCREF(__Pyx_CyFunction_Defaults(__pyx_defaults, __pyx_self)->__pyx_arg_verbose); - __Pyx_GIVEREF(__Pyx_CyFunction_Defaults(__pyx_defaults, __pyx_self)->__pyx_arg_verbose); - PyTuple_SET_ITEM(__pyx_t_1, 1, __Pyx_CyFunction_Defaults(__pyx_defaults, __pyx_self)->__pyx_arg_verbose); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None); - __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.__defaults__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_1set_logging(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_1set_logging = {"set_logging", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_1set_logging, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_1set_logging(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_name = 0; - PyObject *__pyx_v_verbose = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("set_logging (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,&__pyx_n_s_verbose,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,&__pyx_n_s_verbose,0}; - #endif - PyObject* values[2] = {0,0}; - __pyx_defaults *__pyx_dynamic_args = __Pyx_CyFunction_Defaults(__pyx_defaults, __pyx_self); - values[0] = ((PyObject *)((PyObject *)Py_None)); - values[1] = __pyx_dynamic_args->__pyx_arg_verbose; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_verbose); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "set_logging") < 0)) __PYX_ERR(0, 40, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_name = values[0]; - __pyx_v_verbose = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("set_logging", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 40, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.set_logging", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_set_logging(__pyx_self, __pyx_v_name, __pyx_v_verbose); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_set_logging(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_name, PyObject *__pyx_v_verbose) { - PyObject *__pyx_v_rank = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("set_logging", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":42 - * def set_logging(name=None, verbose=VERBOSE): - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings # <<<<<<<<<<<<<< - * logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) - * return logging.getLogger(name) - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_os); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_getenv); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_rank = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":43 - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - * logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) # <<<<<<<<<<<<<< - * return logging.getLogger(name) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_logging); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_basicConfig); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_format, __pyx_kp_u_message_s) < 0) __PYX_ERR(0, 43, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_verbose); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 43, __pyx_L1_error) - if (__pyx_t_5) { - } else { - __pyx_t_4 = __pyx_t_5; - goto __pyx_L3_bool_binop_done; - } - __Pyx_INCREF(__pyx_v_rank); - __pyx_t_6 = __pyx_v_rank; - __pyx_t_7 = __Pyx_PyInt_EqObjC(__pyx_t_6, __pyx_int_neg_1, -1L, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (!__pyx_t_8) { - } else { - __pyx_t_5 = __pyx_t_8; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_7 = __Pyx_PyInt_EqObjC(__pyx_t_6, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_5 = __pyx_t_8; - __pyx_L5_bool_binop_done:; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_8 = (__pyx_t_5 != 0); - __pyx_t_4 = __pyx_t_8; - __pyx_L3_bool_binop_done:; - if (__pyx_t_4) { - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_logging); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_INFO); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_3 = __pyx_t_7; - __pyx_t_7 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_logging); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_WARNING); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_3 = __pyx_t_6; - __pyx_t_6 = 0; - } - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_level, __pyx_t_3) < 0) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":44 - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - * logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) - * return logging.getLogger(name) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_logging); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_getLogger); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_name}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":40 - * - * - * def set_logging(name=None, verbose=VERBOSE): # <<<<<<<<<<<<<< - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.set_logging", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_rank); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":49 - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) - * - * def try_except(func): # <<<<<<<<<<<<<< - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_3try_except(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_3try_except = {"try_except", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_3try_except, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_3try_except(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_func = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("try_except (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_func,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_func,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_func)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 49, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "try_except") < 0)) __PYX_ERR(0, 49, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_func = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("try_except", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 49, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.try_except", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_2try_except(__pyx_self, __pyx_v_func); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":51 - * def try_except(func): - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): # <<<<<<<<<<<<<< - * try: - * func(*args, **kwargs) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_1handler(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_1handler = {"handler", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_1handler, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_1handler(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_args = 0; - PyObject *__pyx_v_kwargs = 0; - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("handler (wrapper)", 0); - if (unlikely(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "handler", 1))) return NULL; - if (unlikely(__pyx_kwds)) { - __pyx_v_kwargs = __Pyx_KwargsAsDict_VARARGS(__pyx_kwds, __pyx_kwvalues); - if (unlikely(!__pyx_v_kwargs)) return NULL; - __Pyx_GOTREF(__pyx_v_kwargs); - } else { - __pyx_v_kwargs = PyDict_New(); - if (unlikely(!__pyx_v_kwargs)) return NULL; - __Pyx_GOTREF(__pyx_v_kwargs); - } - __Pyx_INCREF(__pyx_args); - __pyx_v_args = __pyx_args; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_handler(__pyx_self, __pyx_v_args, __pyx_v_kwargs); - - /* function exit code */ - __Pyx_DECREF(__pyx_v_args); - __Pyx_DECREF(__pyx_v_kwargs); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_handler(PyObject *__pyx_self, PyObject *__pyx_v_args, PyObject *__pyx_v_kwargs) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *__pyx_cur_scope; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *__pyx_outer_scope; - PyObject *__pyx_v_e = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - char const *__pyx_t_10; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - PyObject *__pyx_t_14 = NULL; - PyObject *__pyx_t_15 = NULL; - PyObject *__pyx_t_16 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("handler", 0); - __pyx_outer_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *) __Pyx_CyFunction_GetClosure(__pyx_self); - __pyx_cur_scope = __pyx_outer_scope; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":52 - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - * try: # <<<<<<<<<<<<<< - * func(*args, **kwargs) - * except Exception as e: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_1); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - /*try:*/ { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":53 - * def handler(*args, **kwargs): - * try: - * func(*args, **kwargs) # <<<<<<<<<<<<<< - * except Exception as e: - * print(e) - */ - if (unlikely(!__pyx_cur_scope->__pyx_v_func)) { __Pyx_RaiseClosureNameError("func"); __PYX_ERR(0, 53, __pyx_L3_error) } - __pyx_t_4 = PyDict_Copy(__pyx_v_kwargs); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 53, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_cur_scope->__pyx_v_func, __pyx_v_args, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 53, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":52 - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - * try: # <<<<<<<<<<<<<< - * func(*args, **kwargs) - * except Exception as e: - */ - } - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - goto __pyx_L8_try_end; - __pyx_L3_error:; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":54 - * try: - * func(*args, **kwargs) - * except Exception as e: # <<<<<<<<<<<<<< - * print(e) - * - */ - __pyx_t_6 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); - if (__pyx_t_6) { - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.try_except.handler", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(0, 54, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GOTREF(__pyx_t_7); - __Pyx_INCREF(__pyx_t_4); - __pyx_v_e = __pyx_t_4; - /*try:*/ { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":55 - * func(*args, **kwargs) - * except Exception as e: - * print(e) # <<<<<<<<<<<<<< - * - * return handler - */ - __pyx_t_8 = __Pyx_PyObject_CallOneArg(__pyx_builtin_print, __pyx_v_e); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 55, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":54 - * try: - * func(*args, **kwargs) - * except Exception as e: # <<<<<<<<<<<<<< - * print(e) - * - */ - /*finally:*/ { - /*normal exit:*/{ - __Pyx_DECREF(__pyx_v_e); __pyx_v_e = 0; - goto __pyx_L15; - } - __pyx_L14_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_14, &__pyx_t_15, &__pyx_t_16); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13) < 0)) __Pyx_ErrFetch(&__pyx_t_11, &__pyx_t_12, &__pyx_t_13); - __Pyx_XGOTREF(__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_12); - __Pyx_XGOTREF(__pyx_t_13); - __Pyx_XGOTREF(__pyx_t_14); - __Pyx_XGOTREF(__pyx_t_15); - __Pyx_XGOTREF(__pyx_t_16); - __pyx_t_6 = __pyx_lineno; __pyx_t_9 = __pyx_clineno; __pyx_t_10 = __pyx_filename; - { - __Pyx_DECREF(__pyx_v_e); __pyx_v_e = 0; - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_14); - __Pyx_XGIVEREF(__pyx_t_15); - __Pyx_XGIVEREF(__pyx_t_16); - __Pyx_ExceptionReset(__pyx_t_14, __pyx_t_15, __pyx_t_16); - } - __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_XGIVEREF(__pyx_t_12); - __Pyx_XGIVEREF(__pyx_t_13); - __Pyx_ErrRestore(__pyx_t_11, __pyx_t_12, __pyx_t_13); - __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; - __pyx_lineno = __pyx_t_6; __pyx_clineno = __pyx_t_9; __pyx_filename = __pyx_t_10; - goto __pyx_L5_except_error; - } - __pyx_L15:; - } - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - goto __pyx_L4_exception_handled; - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":52 - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - * try: # <<<<<<<<<<<<<< - * func(*args, **kwargs) - * except Exception as e: - */ - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - goto __pyx_L1_error; - __pyx_L4_exception_handled:; - __Pyx_XGIVEREF(__pyx_t_1); - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); - __pyx_L8_try_end:; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":51 - * def try_except(func): - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): # <<<<<<<<<<<<<< - * try: - * func(*args, **kwargs) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.try_except.handler", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_e); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":49 - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) - * - * def try_except(func): # <<<<<<<<<<<<<< - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_2try_except(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *__pyx_cur_scope; - PyObject *__pyx_v_handler = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("try_except", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 49, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_v_func = __pyx_v_func; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_func); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_func); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":51 - * def try_except(func): - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): # <<<<<<<<<<<<<< - * try: - * func(*args, **kwargs) - */ - __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10try_except_1handler, 0, __pyx_n_s_try_except_locals_handler, ((PyObject*)__pyx_cur_scope), __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_handler = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":57 - * print(e) - * - * return handler # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_handler); - __pyx_r = __pyx_v_handler; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":49 - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) - * - * def try_except(func): # <<<<<<<<<<<<<< - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.try_except", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_handler); - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":60 - * - * - * def methods(instance): # <<<<<<<<<<<<<< - * # Get class/instance methods - * return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_5methods(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_5methods = {"methods", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_5methods, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_5methods(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_instance = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("methods (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_instance,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_instance,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_instance)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "methods") < 0)) __PYX_ERR(0, 60, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_instance = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("methods", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 60, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.methods", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_4methods(__pyx_self, __pyx_v_instance); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_4methods(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_instance) { - PyObject *__pyx_7genexpr__pyx_v_f = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t __pyx_t_4; - PyObject *(*__pyx_t_5)(PyObject *); - int __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("methods", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":62 - * def methods(instance): - * # Get class/instance methods - * return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - { /* enter inner scope */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_Dir(__pyx_v_instance); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { - __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; - __pyx_t_5 = NULL; - } else { - __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 62, __pyx_L5_error) - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - for (;;) { - if (likely(!__pyx_t_5)) { - if (likely(PyList_CheckExact(__pyx_t_3))) { - if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(0, 62, __pyx_L5_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } else { - if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(0, 62, __pyx_L5_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } - } else { - __pyx_t_2 = __pyx_t_5(__pyx_t_3); - if (unlikely(!__pyx_t_2)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 62, __pyx_L5_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF_SET(__pyx_7genexpr__pyx_v_f, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetAttr(__pyx_v_instance, __pyx_7genexpr__pyx_v_f); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = __Pyx_PyCallable_Check(__pyx_t_2); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_8 = (__pyx_t_7 != 0); - if (__pyx_t_8) { - } else { - __pyx_t_6 = __pyx_t_8; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_7genexpr__pyx_v_f, __pyx_n_s_startswith); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = NULL; - __pyx_t_11 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_9))) { - __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_9); - if (likely(__pyx_t_10)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); - __Pyx_INCREF(__pyx_t_10); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_9, function); - __pyx_t_11 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_n_u__4}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+1-__pyx_t_11, 1+__pyx_t_11); - __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 62, __pyx_L5_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_7 = ((!__pyx_t_8) != 0); - __pyx_t_6 = __pyx_t_7; - __pyx_L9_bool_binop_done:; - if (__pyx_t_6) { - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_7genexpr__pyx_v_f))) __PYX_ERR(0, 62, __pyx_L5_error) - } - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_7genexpr__pyx_v_f); __pyx_7genexpr__pyx_v_f = 0; - goto __pyx_L11_exit_scope; - __pyx_L5_error:; - __Pyx_XDECREF(__pyx_7genexpr__pyx_v_f); __pyx_7genexpr__pyx_v_f = 0; - goto __pyx_L1_error; - __pyx_L11_exit_scope:; - } /* exit inner scope */ - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":60 - * - * - * def methods(instance): # <<<<<<<<<<<<<< - * # Get class/instance methods - * return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.methods", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_7genexpr__pyx_v_f); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":65 - * - * - * def print_args(name, opt): # <<<<<<<<<<<<<< - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_7print_args(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_7print_args = {"print_args", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_7print_args, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_7print_args(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_name = 0; - PyObject *__pyx_v_opt = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("print_args (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,&__pyx_n_s_opt,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,&__pyx_n_s_opt,0}; - #endif - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 65, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_opt)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 65, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("print_args", 1, 2, 2, 1); __PYX_ERR(0, 65, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "print_args") < 0)) __PYX_ERR(0, 65, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_name = values[0]; - __pyx_v_opt = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("print_args", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 65, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.print_args", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_6print_args(__pyx_self, __pyx_v_name, __pyx_v_opt); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":67 - * def print_args(name, opt): - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) # <<<<<<<<<<<<<< - * - * - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_genexpr(PyObject *__pyx_self) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *__pyx_cur_scope; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("genexpr", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 67, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *) __pyx_self; - __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - { - __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_2generator, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_print_args_locals_genexpr, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); if (unlikely(!gen)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_DECREF(__pyx_cur_scope); - __Pyx_RefNannyFinishContext(); - return (PyObject *) gen; - } - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.print_args.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ -{ - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *__pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *)__pyx_generator->closure); - PyObject *__pyx_r = NULL; - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - Py_ssize_t __pyx_t_8; - Py_UCS4 __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("genexpr", 0); - switch (__pyx_generator->resume_label) { - case 0: goto __pyx_L3_first_run; - default: /* CPython raises the right error here */ - __Pyx_RefNannyFinishContext(); - return NULL; - } - __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 67, __pyx_L1_error) - __pyx_r = PyList_New(0); if (unlikely(!__pyx_r)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_r); - __pyx_t_2 = 0; - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_opt)) { __Pyx_RaiseClosureNameError("opt"); __PYX_ERR(0, 67, __pyx_L1_error) } - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_vars, __pyx_cur_scope->__pyx_outer_scope->__pyx_v_opt); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (unlikely(__pyx_t_5 == Py_None)) { - PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items"); - __PYX_ERR(0, 67, __pyx_L1_error) - } - __pyx_t_6 = __Pyx_dict_iterator(__pyx_t_5, 0, __pyx_n_s_items, (&__pyx_t_3), (&__pyx_t_4)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_1); - __pyx_t_1 = __pyx_t_6; - __pyx_t_6 = 0; - while (1) { - __pyx_t_7 = __Pyx_dict_iter_next(__pyx_t_1, __pyx_t_3, &__pyx_t_2, &__pyx_t_6, &__pyx_t_5, NULL, __pyx_t_4); - if (unlikely(__pyx_t_7 == 0)) break; - if (unlikely(__pyx_t_7 == -1)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_5); - __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_k); - __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_k, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - __pyx_t_6 = 0; - __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_v); - __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_v, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_8 = 0; - __pyx_t_9 = 127; - __pyx_t_6 = __Pyx_PyObject_FormatSimple(__pyx_cur_scope->__pyx_v_k, __pyx_empty_unicode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) > __pyx_t_9) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) : __pyx_t_9; - __pyx_t_8 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); - __pyx_t_6 = 0; - __Pyx_INCREF(__pyx_kp_u__5); - __pyx_t_8 += 1; - __Pyx_GIVEREF(__pyx_kp_u__5); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_kp_u__5); - __pyx_t_6 = __Pyx_PyObject_FormatSimple(__pyx_cur_scope->__pyx_v_v, __pyx_empty_unicode); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) > __pyx_t_9) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_6) : __pyx_t_9; - __pyx_t_8 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_6); - __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyUnicode_Join(__pyx_t_5, 3, __pyx_t_8, __pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(__Pyx_ListComp_Append(__pyx_r, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_r); __pyx_r = 0; - __Pyx_Generator_Replace_StopIteration(0); - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - #if !CYTHON_USE_EXC_INFO_STACK - __Pyx_Coroutine_ResetAndClearException(__pyx_generator); - #endif - __pyx_generator->resume_label = -1; - __Pyx_Coroutine_clear((PyObject*)__pyx_generator); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":65 - * - * - * def print_args(name, opt): # <<<<<<<<<<<<<< - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_6print_args(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_name, PyObject *__pyx_v_opt) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *__pyx_cur_scope; - PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_2generator = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("print_args", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 65, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_v_opt = __pyx_v_opt; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_opt); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_opt); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":67 - * def print_args(name, opt): - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_LOGGER); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_info); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_colorstr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_FormatSimple(__pyx_v_name, __pyx_empty_unicode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_kp_u__6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_6}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_t_4 = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_Generator_Next(__pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyUnicode_Join(__pyx_kp_u__7, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyNumber_Add(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_6}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":65 - * - * - * def print_args(name, opt): # <<<<<<<<<<<<<< - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.print_args", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10print_args_2generator); - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":70 - * - * - * def init_seeds(seed=0): # <<<<<<<<<<<<<< - * # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9init_seeds(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9init_seeds = {"init_seeds", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9init_seeds, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9init_seeds(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_seed = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("init_seeds (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_seed,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_seed,0}; - #endif - PyObject* values[1] = {0}; - values[0] = ((PyObject *)((PyObject *)__pyx_int_0)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_seed); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 70, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "init_seeds") < 0)) __PYX_ERR(0, 70, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_seed = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("init_seeds", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 70, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.init_seeds", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8init_seeds(__pyx_self, __pyx_v_seed); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8init_seeds(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_seed) { - PyObject *__pyx_v_cudnn = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - __pyx_ctuple_int__and_int __pyx_t_5; - int __pyx_t_6; - __pyx_ctuple_int__and_int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("init_seeds", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":73 - * # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - * import torch.backends.cudnn as cudnn # <<<<<<<<<<<<<< - * random.seed(seed) - * np.random.seed(seed) - */ - __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_torch_backends_cudnn, __pyx_tuple__8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_cudnn = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":74 - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - * import torch.backends.cudnn as cudnn - * random.seed(seed) # <<<<<<<<<<<<<< - * np.random.seed(seed) - * torch.manual_seed(seed) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_seed); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_seed}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":75 - * import torch.backends.cudnn as cudnn - * random.seed(seed) - * np.random.seed(seed) # <<<<<<<<<<<<<< - * torch.manual_seed(seed) - * cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_seed); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_seed}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":76 - * random.seed(seed) - * np.random.seed(seed) - * torch.manual_seed(seed) # <<<<<<<<<<<<<< - * cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_torch); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_manual_seed); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_seed}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":77 - * np.random.seed(seed) - * torch.manual_seed(seed) - * cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_v_seed, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_6) { - __pyx_t_7.f0 = 0; - __pyx_t_7.f1 = 1; - __pyx_t_5 = __pyx_t_7; - } else { - __pyx_t_7.f0 = 1; - __pyx_t_7.f1 = 0; - __pyx_t_5 = __pyx_t_7; - } - __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_t_5.f0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_t_5.f1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_PyObject_SetAttrStr(__pyx_v_cudnn, __pyx_n_s_benchmark, __pyx_t_1) < 0) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_v_cudnn, __pyx_n_s_deterministic, __pyx_t_2) < 0) __PYX_ERR(0, 77, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":70 - * - * - * def init_seeds(seed=0): # <<<<<<<<<<<<<< - * # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.init_seeds", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_cudnn); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":80 - * - * - * def intersect_dicts(da, db, exclude=()): # <<<<<<<<<<<<<< - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_11intersect_dicts(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_11intersect_dicts = {"intersect_dicts", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_11intersect_dicts, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_11intersect_dicts(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_da = 0; - PyObject *__pyx_v_db = 0; - PyObject *__pyx_v_exclude = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("intersect_dicts (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_da,&__pyx_n_s_db,&__pyx_n_s_exclude,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_da,&__pyx_n_s_db,&__pyx_n_s_exclude,0}; - #endif - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)((PyObject*)__pyx_empty_tuple)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_da)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_db)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("intersect_dicts", 0, 2, 3, 1); __PYX_ERR(0, 80, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_exclude); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "intersect_dicts") < 0)) __PYX_ERR(0, 80, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_da = values[0]; - __pyx_v_db = values[1]; - __pyx_v_exclude = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("intersect_dicts", 0, 2, 3, __pyx_nargs); __PYX_ERR(0, 80, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.intersect_dicts", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10intersect_dicts(__pyx_self, __pyx_v_da, __pyx_v_db, __pyx_v_exclude); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15intersect_dicts_8genexpr2_2generator1(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":82 - * def intersect_dicts(da, db, exclude=()): - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} # <<<<<<<<<<<<<< - * - * - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15intersect_dicts_8genexpr2_genexpr(PyObject *__pyx_self) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *__pyx_cur_scope; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("genexpr", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 82, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *) __pyx_self; - __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - { - __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15intersect_dicts_8genexpr2_2generator1, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_intersect_dicts_locals_genexpr, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); if (unlikely(!gen)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_DECREF(__pyx_cur_scope); - __Pyx_RefNannyFinishContext(); - return (PyObject *) gen; - } - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.intersect_dicts.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15intersect_dicts_8genexpr2_2generator1(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ -{ - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *__pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *)__pyx_generator->closure); - PyObject *__pyx_r = NULL; - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *(*__pyx_t_3)(PyObject *); - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("genexpr", 0); - switch (__pyx_generator->resume_label) { - case 0: goto __pyx_L3_first_run; - default: /* CPython raises the right error here */ - __Pyx_RefNannyFinishContext(); - return NULL; - } - __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 82, __pyx_L1_error) - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_exclude)) { __Pyx_RaiseClosureNameError("exclude"); __PYX_ERR(0, 82, __pyx_L1_error) } - if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_exclude)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_exclude)) { - __pyx_t_1 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_exclude; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - __pyx_t_3 = NULL; - } else { - __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_exclude); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 82, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_3)) { - if (likely(PyList_CheckExact(__pyx_t_1))) { - if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely((0 < 0))) __PYX_ERR(0, 82, __pyx_L1_error) - #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - } else { - if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_4); __pyx_t_2++; if (unlikely((0 < 0))) __PYX_ERR(0, 82, __pyx_L1_error) - #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - } - } else { - __pyx_t_4 = __pyx_t_3(__pyx_t_1); - if (unlikely(!__pyx_t_4)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 82, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_4); - } - __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_x); - __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_x, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - __pyx_t_4 = 0; - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_8genexpr2__pyx_v_k)) { __Pyx_RaiseClosureNameError("k"); __PYX_ERR(0, 82, __pyx_L1_error) } - __pyx_t_5 = (__Pyx_PySequence_ContainsTF(__pyx_cur_scope->__pyx_v_x, __pyx_cur_scope->__pyx_outer_scope->__pyx_8genexpr2__pyx_v_k, Py_EQ)); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 82, __pyx_L1_error) - __pyx_t_6 = (__pyx_t_5 != 0); - if (__pyx_t_6) { - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_True); - __pyx_r = Py_True; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - } - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_False); - __pyx_r = Py_False; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - goto __pyx_L0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_Generator_Replace_StopIteration(0); - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - #if !CYTHON_USE_EXC_INFO_STACK - __Pyx_Coroutine_ResetAndClearException(__pyx_generator); - #endif - __pyx_generator->resume_label = -1; - __Pyx_Coroutine_clear((PyObject*)__pyx_generator); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":80 - * - * - * def intersect_dicts(da, db, exclude=()): # <<<<<<<<<<<<<< - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_10intersect_dicts(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_da, PyObject *__pyx_v_db, PyObject *__pyx_v_exclude) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *__pyx_cur_scope; - PyObject *__pyx_8genexpr2__pyx_v_v = NULL; - PyObject *__pyx_8genexpr2__pyx_v_0 = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_t_10; - int __pyx_t_11; - PyObject *__pyx_t_12 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("intersect_dicts", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 80, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_v_exclude = __pyx_v_exclude; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_exclude); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_exclude); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":82 - * def intersect_dicts(da, db, exclude=()): - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - { /* enter inner scope */ - __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = 0; - if (unlikely(__pyx_v_da == Py_None)) { - PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items"); - __PYX_ERR(0, 82, __pyx_L5_error) - } - __pyx_t_6 = __Pyx_dict_iterator(__pyx_v_da, 0, __pyx_n_s_items, (&__pyx_t_4), (&__pyx_t_5)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_2); - __pyx_t_2 = __pyx_t_6; - __pyx_t_6 = 0; - while (1) { - __pyx_t_8 = __Pyx_dict_iter_next(__pyx_t_2, __pyx_t_4, &__pyx_t_3, &__pyx_t_6, &__pyx_t_7, NULL, __pyx_t_5); - if (unlikely(__pyx_t_8 == 0)) break; - if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_cur_scope->__pyx_8genexpr2__pyx_v_k); - __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_8genexpr2__pyx_v_k, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_6); - __pyx_t_6 = 0; - __Pyx_XDECREF_SET(__pyx_8genexpr2__pyx_v_v, __pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_10 = (__Pyx_PySequence_ContainsTF(__pyx_cur_scope->__pyx_8genexpr2__pyx_v_k, __pyx_v_db, Py_EQ)); if (unlikely((__pyx_t_10 < 0))) __PYX_ERR(0, 82, __pyx_L5_error) - __pyx_t_11 = (__pyx_t_10 != 0); - if (__pyx_t_11) { - } else { - __pyx_t_9 = __pyx_t_11; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_7 = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15intersect_dicts_8genexpr2_genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_6 = __Pyx_Generator_Next(__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_11 < 0))) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_10 = ((!__pyx_t_11) != 0); - if (__pyx_t_10) { - } else { - __pyx_t_9 = __pyx_t_10; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_8genexpr2__pyx_v_v, __pyx_n_s_shape); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetItem(__pyx_v_db, __pyx_cur_scope->__pyx_8genexpr2__pyx_v_k); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_shape); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = PyObject_RichCompare(__pyx_t_6, __pyx_t_12, Py_EQ); __Pyx_XGOTREF(__pyx_t_7); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_7); if (unlikely((__pyx_t_10 < 0))) __PYX_ERR(0, 82, __pyx_L5_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_9 = __pyx_t_10; - __pyx_L9_bool_binop_done:; - if (__pyx_t_9) { - if (unlikely(PyDict_SetItem(__pyx_t_1, (PyObject*)__pyx_cur_scope->__pyx_8genexpr2__pyx_v_k, (PyObject*)__pyx_8genexpr2__pyx_v_v))) __PYX_ERR(0, 82, __pyx_L5_error) - } - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_v); __pyx_8genexpr2__pyx_v_v = 0; - goto __pyx_L12_exit_scope; - __pyx_L5_error:; - __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_v); __pyx_8genexpr2__pyx_v_v = 0; - goto __pyx_L1_error; - __pyx_L12_exit_scope:; - } /* exit inner scope */ - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":80 - * - * - * def intersect_dicts(da, db, exclude=()): # <<<<<<<<<<<<<< - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_12); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.intersect_dicts", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_v); - __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_0); - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":85 - * - * - * def get_latest_run(search_dir='.'): # <<<<<<<<<<<<<< - * # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - * last_list = glob.glob(f'{search_dir}/[inserted by cython to avoid comment start]**[inserted by cython to avoid comment closer]/last*.pt', recursive=True) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_13get_latest_run(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_13get_latest_run = {"get_latest_run", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_13get_latest_run, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_13get_latest_run(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_search_dir = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("get_latest_run (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_search_dir,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_search_dir,0}; - #endif - PyObject* values[1] = {0}; - values[0] = ((PyObject *)((PyObject*)__pyx_kp_u__10)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_search_dir); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "get_latest_run") < 0)) __PYX_ERR(0, 85, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_search_dir = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("get_latest_run", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 85, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.get_latest_run", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_12get_latest_run(__pyx_self, __pyx_v_search_dir); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_12get_latest_run(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_search_dir) { - PyObject *__pyx_v_last_list = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_latest_run", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":87 - * def get_latest_run(search_dir='.'): - * # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - * last_list = glob.glob(f'{search_dir}/[inserted by cython to avoid comment start]**[inserted by cython to avoid comment closer]/last*.pt', recursive=True) # <<<<<<<<<<<<<< - * return max(last_list, key=os.path.getctime) if last_list else '' - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_glob); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_glob); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_v_search_dir, __pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_kp_u_last_pt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_recursive, Py_True) < 0) __PYX_ERR(0, 87, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_last_list = __pyx_t_4; - __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":88 - * # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - * last_list = glob.glob(f'{search_dir}/[inserted by cython to avoid comment start]**[inserted by cython to avoid comment closer]/last*.pt', recursive=True) - * return max(last_list, key=os.path.getctime) if last_list else '' # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_last_list); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 88, __pyx_L1_error) - if (__pyx_t_5) { - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_last_list); - __Pyx_GIVEREF(__pyx_v_last_list); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_last_list); - __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_os); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_path); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_getctime); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_key, __pyx_t_2) < 0) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_max, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_4 = __pyx_t_2; - __pyx_t_2 = 0; - } else { - __Pyx_INCREF(__pyx_kp_u__11); - __pyx_t_4 = __pyx_kp_u__11; - } - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":85 - * - * - * def get_latest_run(search_dir='.'): # <<<<<<<<<<<<<< - * # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - * last_list = glob.glob(f'{search_dir}/[inserted by cython to avoid comment start]**[inserted by cython to avoid comment closer]/last*.pt', recursive=True) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.get_latest_run", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_last_list); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":91 - * - * - * def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): # <<<<<<<<<<<<<< - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15user_config_dir(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15user_config_dir = {"user_config_dir", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15user_config_dir, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15user_config_dir(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_dir = 0; - PyObject *__pyx_v_env_var = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("user_config_dir (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dir,&__pyx_n_s_env_var,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dir,&__pyx_n_s_env_var,0}; - #endif - PyObject* values[2] = {0,0}; - values[0] = ((PyObject *)((PyObject*)__pyx_n_u_Ultralytics)); - values[1] = ((PyObject *)((PyObject*)__pyx_n_u_YOLOV5_CONFIG_DIR)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_dir); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 91, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_env_var); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 91, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "user_config_dir") < 0)) __PYX_ERR(0, 91, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_dir = values[0]; - __pyx_v_env_var = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("user_config_dir", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 91, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.user_config_dir", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_14user_config_dir(__pyx_self, __pyx_v_dir, __pyx_v_env_var); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_14user_config_dir(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dir, PyObject *__pyx_v_env_var) { - PyObject *__pyx_v_env = NULL; - PyObject *__pyx_v_path = NULL; - PyObject *__pyx_v_cfg = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("user_config_dir", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":93 - * def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) # <<<<<<<<<<<<<< - * if env: - * path = Path(env) # use environment variable - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_os); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_getenv); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_env_var}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_env = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":94 - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) - * if env: # <<<<<<<<<<<<<< - * path = Path(env) # use environment variable - * else: - */ - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_env); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 94, __pyx_L1_error) - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":95 - * env = os.getenv(env_var) - * if env: - * path = Path(env) # use environment variable # <<<<<<<<<<<<<< - * else: - * cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_env}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_path = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":94 - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) - * if env: # <<<<<<<<<<<<<< - * path = Path(env) # use environment variable - * else: - */ - goto __pyx_L3; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":97 - * path = Path(env) # use environment variable - * else: - * cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs # <<<<<<<<<<<<<< - * path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - * path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - */ - /*else*/ { - __pyx_t_1 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_Windows, __pyx_kp_u_AppData_Roaming) < 0) __PYX_ERR(0, 97, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_Linux, __pyx_kp_u_config) < 0) __PYX_ERR(0, 97, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_Darwin, __pyx_kp_u_Library_Application_Support) < 0) __PYX_ERR(0, 97, __pyx_L1_error) - __pyx_v_cfg = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":98 - * else: - * cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - * path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir # <<<<<<<<<<<<<< - * path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - * path.mkdir(exist_ok=True) # make if required - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_home); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_platform); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_system); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = __Pyx_PyDict_GetItemDefault(__pyx_v_cfg, __pyx_t_2, __pyx_kp_u__11); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_v_path = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":99 - * cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - * path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - * path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable # <<<<<<<<<<<<<< - * path.mkdir(exist_ok=True) # make if required - * return path - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_is_writeable); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_path}; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (__pyx_t_5) { - __Pyx_INCREF(__pyx_v_path); - __pyx_t_2 = __pyx_v_path; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_Path); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_kp_u_tmp}; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_t_2 = __pyx_t_6; - __pyx_t_6 = 0; - } - __pyx_t_6 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_v_dir); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_path, __pyx_t_6); - __pyx_t_6 = 0; - } - __pyx_L3:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":100 - * path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - * path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - * path.mkdir(exist_ok=True) # make if required # <<<<<<<<<<<<<< - * return path - * - */ - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_mkdir); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_exist_ok, Py_True) < 0) __PYX_ERR(0, 100, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":101 - * path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - * path.mkdir(exist_ok=True) # make if required - * return path # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_path); - __pyx_r = __pyx_v_path; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":91 - * - * - * def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): # <<<<<<<<<<<<<< - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.user_config_dir", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_env); - __Pyx_XDECREF(__pyx_v_path); - __Pyx_XDECREF(__pyx_v_cfg); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":104 - * - * - * def is_writeable(dir, test=False): # <<<<<<<<<<<<<< - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_17is_writeable(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_17is_writeable = {"is_writeable", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_17is_writeable, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_17is_writeable(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_dir = 0; - PyObject *__pyx_v_test = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_writeable (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dir,&__pyx_n_s_test,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dir,&__pyx_n_s_test,0}; - #endif - PyObject* values[2] = {0,0}; - values[1] = ((PyObject *)((PyObject *)Py_False)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_dir)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_test); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "is_writeable") < 0)) __PYX_ERR(0, 104, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_dir = values[0]; - __pyx_v_test = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("is_writeable", 0, 1, 2, __pyx_nargs); __PYX_ERR(0, 104, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_writeable", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_16is_writeable(__pyx_self, __pyx_v_dir, __pyx_v_test); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_16is_writeable(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dir, PyObject *__pyx_v_test) { - PyObject *__pyx_v_file = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - PyObject *__pyx_t_13 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_writeable", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":106 - * def is_writeable(dir, test=False): - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 # <<<<<<<<<<<<<< - * file = Path(dir) / 'tmp.txt' - * try: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_test); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 106, __pyx_L1_error) - if (__pyx_t_1) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":107 - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 - * file = Path(dir) / 'tmp.txt' # <<<<<<<<<<<<<< - * try: - * with open(file, 'w'): # open file with write permissions - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_dir}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_kp_u_tmp_txt); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_file = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":108 - * if test: # method 1 - * file = Path(dir) / 'tmp.txt' - * try: # <<<<<<<<<<<<<< - * with open(file, 'w'): # open file with write permissions - * pass - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_6); - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - /*try:*/ { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":109 - * file = Path(dir) / 'tmp.txt' - * try: - * with open(file, 'w'): # open file with write permissions # <<<<<<<<<<<<<< - * pass - * file.unlink() # remove file - */ - /*with:*/ { - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_file); - __Pyx_GIVEREF(__pyx_v_file); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_file); - __Pyx_INCREF(__pyx_n_u_w); - __Pyx_GIVEREF(__pyx_n_u_w); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_n_u_w); - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_open, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 109, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_9 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_exit); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 109, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_4 = __Pyx_PyObject_LookupSpecial(__pyx_t_2, __pyx_n_s_enter); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 109, __pyx_L10_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_10 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_10)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_10); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_10, }; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L10_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - /*try:*/ { - { - (void)__pyx_t_11; (void)__pyx_t_12; (void)__pyx_t_13; /* mark used */ - /*try:*/ { - } - } - } - /*finally:*/ { - /*normal exit:*/{ - if (__pyx_t_9) { - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_tuple__12, NULL); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 109, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - } - goto __pyx_L13; - } - __pyx_L13:; - } - goto __pyx_L20; - __pyx_L10_error:; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L4_error; - __pyx_L20:; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":111 - * with open(file, 'w'): # open file with write permissions - * pass - * file.unlink() # remove file # <<<<<<<<<<<<<< - * return True - * except OSError: - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_file, __pyx_n_s_unlink); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_4, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":112 - * pass - * file.unlink() # remove file - * return True # <<<<<<<<<<<<<< - * except OSError: - * return False - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_True); - __pyx_r = Py_True; - goto __pyx_L8_try_return; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":108 - * if test: # method 1 - * file = Path(dir) / 'tmp.txt' - * try: # <<<<<<<<<<<<<< - * with open(file, 'w'): # open file with write permissions - * pass - */ - } - __pyx_L4_error:; - __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":113 - * file.unlink() # remove file - * return True - * except OSError: # <<<<<<<<<<<<<< - * return False - * else: # method 2 - */ - __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_OSError); - if (__pyx_t_5) { - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_writeable", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4) < 0) __PYX_ERR(0, 113, __pyx_L6_except_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GOTREF(__pyx_t_4); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":114 - * return True - * except OSError: - * return False # <<<<<<<<<<<<<< - * else: # method 2 - * return os.access(dir, os.R_OK) # possible issues on Windows - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_False); - __pyx_r = Py_False; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L7_except_return; - } - goto __pyx_L6_except_error; - __pyx_L6_except_error:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":108 - * if test: # method 1 - * file = Path(dir) / 'tmp.txt' - * try: # <<<<<<<<<<<<<< - * with open(file, 'w'): # open file with write permissions - * pass - */ - __Pyx_XGIVEREF(__pyx_t_6); - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8); - goto __pyx_L1_error; - __pyx_L8_try_return:; - __Pyx_XGIVEREF(__pyx_t_6); - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8); - goto __pyx_L0; - __pyx_L7_except_return:; - __Pyx_XGIVEREF(__pyx_t_6); - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_ExceptionReset(__pyx_t_6, __pyx_t_7, __pyx_t_8); - goto __pyx_L0; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":106 - * def is_writeable(dir, test=False): - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 # <<<<<<<<<<<<<< - * file = Path(dir) / 'tmp.txt' - * try: - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":116 - * return False - * else: # method 2 - * return os.access(dir, os.R_OK) # possible issues on Windows # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_os); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_access); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 116, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_os); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_R_OK); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 116, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_v_dir, __pyx_t_10}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 116, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":104 - * - * - * def is_writeable(dir, test=False): # <<<<<<<<<<<<<< - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_writeable", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_file); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":119 - * - * - * def is_ascii(s=''): # <<<<<<<<<<<<<< - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_19is_ascii(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_19is_ascii = {"is_ascii", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_19is_ascii, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_19is_ascii(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_s = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_ascii (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_s,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_s,0}; - #endif - PyObject* values[1] = {0}; - values[0] = ((PyObject *)((PyObject*)__pyx_kp_u__11)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_s); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "is_ascii") < 0)) __PYX_ERR(0, 119, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_s = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("is_ascii", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 119, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_ascii", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_18is_ascii(__pyx_self, __pyx_v_s); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_18is_ascii(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_s) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_ascii", 0); - __Pyx_INCREF(__pyx_v_s); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":121 - * def is_ascii(s=''): - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str # <<<<<<<<<<<<<< - * return len(s.encode().decode('ascii', 'ignore')) == len(s) - * - */ - __pyx_t_1 = __Pyx_PyObject_Str(__pyx_v_s); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF_SET(__pyx_v_s, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":122 - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str - * return len(s.encode().decode('ascii', 'ignore')) == len(s) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_s, __pyx_n_s_encode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_decode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = PyObject_Length(__pyx_t_1); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_6 = PyObject_Length(__pyx_v_s); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 122, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyBool_FromLong((__pyx_t_5 == __pyx_t_6)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":119 - * - * - * def is_ascii(s=''): # <<<<<<<<<<<<<< - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_ascii", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_s); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":125 - * - * - * def is_chinese(s=''): # <<<<<<<<<<<<<< - * # Is string composed of any Chinese characters? - * return re.search('[\u4e00-\u9fff]', s) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_21is_chinese(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_21is_chinese = {"is_chinese", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_21is_chinese, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_21is_chinese(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_s = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_chinese (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_s,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_s,0}; - #endif - PyObject* values[1] = {0}; - values[0] = ((PyObject *)((PyObject*)__pyx_n_u__14)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_s); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 125, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "is_chinese") < 0)) __PYX_ERR(0, 125, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_s = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("is_chinese", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 125, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_chinese", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_20is_chinese(__pyx_self, __pyx_v_s); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_20is_chinese(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_s) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_chinese", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":127 - * def is_chinese(s=''): - * # Is string composed of any Chinese characters? - * return re.search('[\u4e00-\u9fff]', s) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_re); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_search); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_kp_u__15, __pyx_v_s}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":125 - * - * - * def is_chinese(s=''): # <<<<<<<<<<<<<< - * # Is string composed of any Chinese characters? - * return re.search('[\u4e00-\u9fff]', s) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.is_chinese", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":130 - * - * - * def emojis(str=''): # <<<<<<<<<<<<<< - * # Return platform-dependent emoji-safe version of string - * return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_23emojis(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_23emojis = {"emojis", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_23emojis, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_23emojis(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_str = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("emojis (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_str,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_str,0}; - #endif - PyObject* values[1] = {0}; - values[0] = ((PyObject *)((PyObject*)__pyx_kp_u__11)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_str); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 130, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "emojis") < 0)) __PYX_ERR(0, 130, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_str = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("emojis", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 130, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.emojis", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_22emojis(__pyx_self, __pyx_v_str); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_22emojis(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_str) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("emojis", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":132 - * def emojis(str=''): - * # Return platform-dependent emoji-safe version of string - * return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_platform); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_system); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_2, __pyx_n_u_Windows, Py_EQ)); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_6) { - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_str, __pyx_n_s_encode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_decode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 132, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_1 = __pyx_t_2; - __pyx_t_2 = 0; - } else { - __Pyx_INCREF(__pyx_v_str); - __pyx_t_1 = __pyx_v_str; - } - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":130 - * - * - * def emojis(str=''): # <<<<<<<<<<<<<< - * # Return platform-dependent emoji-safe version of string - * return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.emojis", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":135 - * - * - * def file_size(path): # <<<<<<<<<<<<<< - * # Return file/dir size (MB) - * path = Path(path) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_25file_size(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_25file_size = {"file_size", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_25file_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_25file_size(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_path = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("file_size (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_path,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_path,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_path)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 135, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "file_size") < 0)) __PYX_ERR(0, 135, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_path = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("file_size", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 135, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.file_size", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_24file_size(__pyx_self, __pyx_v_path); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_2generator2(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":141 - * return path.stat().st_size / 1E6 - * elif path.is_dir(): - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 # <<<<<<<<<<<<<< - * else: - * return 0.0 - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_genexpr(PyObject *__pyx_self) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *__pyx_cur_scope; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("genexpr", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 141, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *) __pyx_self; - __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - { - __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_2generator2, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_file_size_locals_genexpr, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); if (unlikely(!gen)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_DECREF(__pyx_cur_scope); - __Pyx_RefNannyFinishContext(); - return (PyObject *) gen; - } - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.file_size.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_2generator2(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ -{ - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *__pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *)__pyx_generator->closure); - PyObject *__pyx_r = NULL; - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - PyObject *(*__pyx_t_6)(PyObject *); - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("genexpr", 0); - switch (__pyx_generator->resume_label) { - case 0: goto __pyx_L3_first_run; - case 1: goto __pyx_L7_resume_from_yield; - default: /* CPython raises the right error here */ - __Pyx_RefNannyFinishContext(); - return NULL; - } - __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 141, __pyx_L1_error) - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_path)) { __Pyx_RaiseClosureNameError("path"); __PYX_ERR(0, 141, __pyx_L1_error) } - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_path, __pyx_n_s_glob); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_kp_u__16}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { - __pyx_t_2 = __pyx_t_1; __Pyx_INCREF(__pyx_t_2); __pyx_t_5 = 0; - __pyx_t_6 = NULL; - } else { - __pyx_t_5 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 141, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - for (;;) { - if (likely(!__pyx_t_6)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_1 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_5); __Pyx_INCREF(__pyx_t_1); __pyx_t_5++; if (unlikely((0 < 0))) __PYX_ERR(0, 141, __pyx_L1_error) - #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - #endif - } else { - if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_1 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_5); __Pyx_INCREF(__pyx_t_1); __pyx_t_5++; if (unlikely((0 < 0))) __PYX_ERR(0, 141, __pyx_L1_error) - #else - __pyx_t_1 = PySequence_ITEM(__pyx_t_2, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - #endif - } - } else { - __pyx_t_1 = __pyx_t_6(__pyx_t_2); - if (unlikely(!__pyx_t_1)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 141, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_1); - } - __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_f); - __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_f, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_f, __pyx_n_s_is_file); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_7, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_8 < 0))) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_8) { - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_f, __pyx_n_s_stat); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_7, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_st_size); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - __Pyx_XGIVEREF(__pyx_t_2); - __pyx_cur_scope->__pyx_t_0 = __pyx_t_2; - __pyx_cur_scope->__pyx_t_1 = __pyx_t_5; - __pyx_cur_scope->__pyx_t_2 = __pyx_t_6; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - __Pyx_Coroutine_ResetAndClearException(__pyx_generator); - /* return from generator, yielding value */ - __pyx_generator->resume_label = 1; - return __pyx_r; - __pyx_L7_resume_from_yield:; - __pyx_t_2 = __pyx_cur_scope->__pyx_t_0; - __pyx_cur_scope->__pyx_t_0 = 0; - __Pyx_XGOTREF(__pyx_t_2); - __pyx_t_5 = __pyx_cur_scope->__pyx_t_1; - __pyx_t_6 = __pyx_cur_scope->__pyx_t_2; - if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 141, __pyx_L1_error) - } - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); - - /* function exit code */ - PyErr_SetNone(PyExc_StopIteration); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_Generator_Replace_StopIteration(0); - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_XDECREF(__pyx_r); __pyx_r = 0; - #if !CYTHON_USE_EXC_INFO_STACK - __Pyx_Coroutine_ResetAndClearException(__pyx_generator); - #endif - __pyx_generator->resume_label = -1; - __Pyx_Coroutine_clear((PyObject*)__pyx_generator); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":135 - * - * - * def file_size(path): # <<<<<<<<<<<<<< - * # Return file/dir size (MB) - * path = Path(path) - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_24file_size(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_path) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *__pyx_cur_scope; - PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_2generator2 = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("file_size", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 135, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_v_path = __pyx_v_path; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_path); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_path); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":137 - * def file_size(path): - * # Return file/dir size (MB) - * path = Path(path) # <<<<<<<<<<<<<< - * if path.is_file(): - * return path.stat().st_size / 1E6 - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Path); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_cur_scope->__pyx_v_path}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_GOTREF(__pyx_cur_scope->__pyx_v_path); - __Pyx_DECREF_SET(__pyx_cur_scope->__pyx_v_path, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":138 - * # Return file/dir size (MB) - * path = Path(path) - * if path.is_file(): # <<<<<<<<<<<<<< - * return path.stat().st_size / 1E6 - * elif path.is_dir(): - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_path, __pyx_n_s_is_file); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":139 - * path = Path(path) - * if path.is_file(): - * return path.stat().st_size / 1E6 # <<<<<<<<<<<<<< - * elif path.is_dir(): - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_path, __pyx_n_s_stat); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_st_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyFloat_TrueDivideObjC(__pyx_t_2, __pyx_float_1E6, 1E6, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":138 - * # Return file/dir size (MB) - * path = Path(path) - * if path.is_file(): # <<<<<<<<<<<<<< - * return path.stat().st_size / 1E6 - * elif path.is_dir(): - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":140 - * if path.is_file(): - * return path.stat().st_size / 1E6 - * elif path.is_dir(): # <<<<<<<<<<<<<< - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 - * else: - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_cur_scope->__pyx_v_path, __pyx_n_s_is_dir); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 140, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":141 - * return path.stat().st_size / 1E6 - * elif path.is_dir(): - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 # <<<<<<<<<<<<<< - * else: - * return 0.0 - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_sum, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyFloat_TrueDivideObjC(__pyx_t_2, __pyx_float_1E6, 1E6, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":140 - * if path.is_file(): - * return path.stat().st_size / 1E6 - * elif path.is_dir(): # <<<<<<<<<<<<<< - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 - * else: - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":143 - * return sum(f.stat().st_size for f in path.glob('**[inserted by cython to avoid comment closer]/[inserted by cython to avoid comment start]*') if f.is_file()) / 1E6 - * else: - * return 0.0 # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_float_0_0); - __pyx_r = __pyx_float_0_0; - goto __pyx_L0; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":135 - * - * - * def file_size(path): # <<<<<<<<<<<<<< - * # Return file/dir size (MB) - * path = Path(path) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.file_size", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9file_size_2generator2); - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":146 - * - * - * def check_python(minimum='3.6.2'): # <<<<<<<<<<<<<< - * # Check current python version vs. required python version - * check_version(platform.python_version(), minimum, name='Python ', hard=True) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_27check_python(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_27check_python = {"check_python", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_27check_python, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_27check_python(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_minimum = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("check_python (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_minimum,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_minimum,0}; - #endif - PyObject* values[1] = {0}; - values[0] = ((PyObject *)((PyObject*)__pyx_kp_u_3_6_2)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_minimum); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 146, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "check_python") < 0)) __PYX_ERR(0, 146, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_minimum = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("check_python", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 146, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.check_python", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_26check_python(__pyx_self, __pyx_v_minimum); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_26check_python(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_minimum) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("check_python", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":148 - * def check_python(minimum='3.6.2'): - * # Check current python version vs. required python version - * check_version(platform.python_version(), minimum, name='Python ', hard=True) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_check_version); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_platform); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_python_version); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 0+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); - __Pyx_INCREF(__pyx_v_minimum); - __Pyx_GIVEREF(__pyx_v_minimum); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_minimum); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_name, __pyx_kp_u_Python) < 0) __PYX_ERR(0, 148, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_hard, Py_True) < 0) __PYX_ERR(0, 148, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":146 - * - * - * def check_python(minimum='3.6.2'): # <<<<<<<<<<<<<< - * # Check current python version vs. required python version - * check_version(platform.python_version(), minimum, name='Python ', hard=True) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.check_python", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":151 - * - * - * def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # <<<<<<<<<<<<<< - * # Check version vs. required version - * return True - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_29check_version(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_29check_version = {"check_version", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_29check_version, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_29check_version(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - CYTHON_UNUSED PyObject *__pyx_v_current = 0; - CYTHON_UNUSED PyObject *__pyx_v_minimum = 0; - CYTHON_UNUSED PyObject *__pyx_v_name = 0; - CYTHON_UNUSED PyObject *__pyx_v_pinned = 0; - CYTHON_UNUSED PyObject *__pyx_v_hard = 0; - CYTHON_UNUSED PyObject *__pyx_v_verbose = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("check_version (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_current,&__pyx_n_s_minimum,&__pyx_n_s_name,&__pyx_n_s_pinned,&__pyx_n_s_hard,&__pyx_n_s_verbose,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_current,&__pyx_n_s_minimum,&__pyx_n_s_name,&__pyx_n_s_pinned,&__pyx_n_s_hard,&__pyx_n_s_verbose,0}; - #endif - PyObject* values[6] = {0,0,0,0,0,0}; - values[0] = ((PyObject *)((PyObject*)__pyx_kp_u_0_0_0)); - values[1] = ((PyObject *)((PyObject*)__pyx_kp_u_0_0_0)); - values[2] = ((PyObject *)((PyObject*)__pyx_kp_u_version)); - values[3] = ((PyObject *)((PyObject *)Py_False)); - values[4] = ((PyObject *)((PyObject *)Py_False)); - values[5] = ((PyObject *)((PyObject *)Py_False)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_current); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_minimum); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pinned); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_hard); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 5: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_verbose); - if (value) { values[5] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "check_version") < 0)) __PYX_ERR(0, 151, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_current = values[0]; - __pyx_v_minimum = values[1]; - __pyx_v_name = values[2]; - __pyx_v_pinned = values[3]; - __pyx_v_hard = values[4]; - __pyx_v_verbose = values[5]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("check_version", 0, 0, 6, __pyx_nargs); __PYX_ERR(0, 151, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.check_version", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_28check_version(__pyx_self, __pyx_v_current, __pyx_v_minimum, __pyx_v_name, __pyx_v_pinned, __pyx_v_hard, __pyx_v_verbose); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_28check_version(CYTHON_UNUSED PyObject *__pyx_self, CYTHON_UNUSED PyObject *__pyx_v_current, CYTHON_UNUSED PyObject *__pyx_v_minimum, CYTHON_UNUSED PyObject *__pyx_v_name, CYTHON_UNUSED PyObject *__pyx_v_pinned, CYTHON_UNUSED PyObject *__pyx_v_hard, CYTHON_UNUSED PyObject *__pyx_v_verbose) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("check_version", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":153 - * def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): - * # Check version vs. required version - * return True # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(Py_True); - __pyx_r = Py_True; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":151 - * - * - * def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # <<<<<<<<<<<<<< - * # Check version vs. required version - * return True - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":156 - * - * - * def check_img_size(imgsz, s=32, floor=0): # <<<<<<<<<<<<<< - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_31check_img_size(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_31check_img_size = {"check_img_size", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_31check_img_size, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_31check_img_size(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_imgsz = 0; - PyObject *__pyx_v_s = 0; - PyObject *__pyx_v_floor = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("check_img_size (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_imgsz,&__pyx_n_s_s,&__pyx_n_s_floor,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_imgsz,&__pyx_n_s_s,&__pyx_n_s_floor,0}; - #endif - PyObject* values[3] = {0,0,0}; - values[1] = ((PyObject *)((PyObject *)__pyx_int_32)); - values[2] = ((PyObject *)((PyObject *)__pyx_int_0)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_imgsz)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 156, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_s); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 156, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_floor); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 156, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "check_img_size") < 0)) __PYX_ERR(0, 156, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_imgsz = values[0]; - __pyx_v_s = values[1]; - __pyx_v_floor = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("check_img_size", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 156, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.check_img_size", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_30check_img_size(__pyx_self, __pyx_v_imgsz, __pyx_v_s, __pyx_v_floor); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_30check_img_size(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_imgsz, PyObject *__pyx_v_s, PyObject *__pyx_v_floor) { - PyObject *__pyx_v_new_size = NULL; - PyObject *__pyx_8genexpr5__pyx_v_x = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - PyObject *(*__pyx_t_10)(PyObject *); - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - Py_UCS4 __pyx_t_13; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("check_img_size", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":158 - * def check_img_size(imgsz, s=32, floor=0): - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 # <<<<<<<<<<<<<< - * new_size = max(make_divisible(imgsz, int(s)), floor) - * else: # list i.e. img_size=[640, 480] - */ - __pyx_t_1 = PyInt_Check(__pyx_v_imgsz); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":159 - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 - * new_size = max(make_divisible(imgsz, int(s)), floor) # <<<<<<<<<<<<<< - * else: # list i.e. img_size=[640, 480] - * new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] - */ - __Pyx_INCREF(__pyx_v_floor); - __pyx_t_3 = __pyx_v_floor; - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_make_divisible); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 159, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyNumber_Int(__pyx_v_s); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 159, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_7, __pyx_v_imgsz, __pyx_t_6}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 159, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_6 = PyObject_RichCompare(__pyx_t_3, __pyx_t_4, Py_GT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 159, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 159, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (__pyx_t_2) { - __Pyx_INCREF(__pyx_t_3); - __pyx_t_5 = __pyx_t_3; - } else { - __Pyx_INCREF(__pyx_t_4); - __pyx_t_5 = __pyx_t_4; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __pyx_t_5; - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_new_size = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":158 - * def check_img_size(imgsz, s=32, floor=0): - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 # <<<<<<<<<<<<<< - * new_size = max(make_divisible(imgsz, int(s)), floor) - * else: # list i.e. img_size=[640, 480] - */ - goto __pyx_L3; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":161 - * new_size = max(make_divisible(imgsz, int(s)), floor) - * else: # list i.e. img_size=[640, 480] - * new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] # <<<<<<<<<<<<<< - * if new_size != imgsz: - * LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') - */ - /*else*/ { - { /* enter inner scope */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - if (likely(PyList_CheckExact(__pyx_v_imgsz)) || PyTuple_CheckExact(__pyx_v_imgsz)) { - __pyx_t_5 = __pyx_v_imgsz; __Pyx_INCREF(__pyx_t_5); __pyx_t_9 = 0; - __pyx_t_10 = NULL; - } else { - __pyx_t_9 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_imgsz); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 161, __pyx_L6_error) - } - for (;;) { - if (likely(!__pyx_t_10)) { - if (likely(PyList_CheckExact(__pyx_t_5))) { - if (__pyx_t_9 >= PyList_GET_SIZE(__pyx_t_5)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_9); __Pyx_INCREF(__pyx_t_4); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 161, __pyx_L6_error) - #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_5, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - } else { - if (__pyx_t_9 >= PyTuple_GET_SIZE(__pyx_t_5)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_9); __Pyx_INCREF(__pyx_t_4); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 161, __pyx_L6_error) - #else - __pyx_t_4 = PySequence_ITEM(__pyx_t_5, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - } - } else { - __pyx_t_4 = __pyx_t_10(__pyx_t_5); - if (unlikely(!__pyx_t_4)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 161, __pyx_L6_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_4); - } - __Pyx_XDECREF_SET(__pyx_8genexpr5__pyx_v_x, __pyx_t_4); - __pyx_t_4 = 0; - __Pyx_INCREF(__pyx_v_floor); - __pyx_t_4 = __pyx_v_floor; - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_make_divisible); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_11 = __Pyx_PyNumber_Int(__pyx_v_s); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_12 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_12 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_12)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_12); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_12, __pyx_8genexpr5__pyx_v_x, __pyx_t_11}; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __pyx_t_11 = PyObject_RichCompare(__pyx_t_4, __pyx_t_6, Py_GT); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 161, __pyx_L6_error) - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_11); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (__pyx_t_2) { - __Pyx_INCREF(__pyx_t_4); - __pyx_t_7 = __pyx_t_4; - } else { - __Pyx_INCREF(__pyx_t_6); - __pyx_t_7 = __pyx_t_6; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_7))) __PYX_ERR(0, 161, __pyx_L6_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_8genexpr5__pyx_v_x); __pyx_8genexpr5__pyx_v_x = 0; - goto __pyx_L9_exit_scope; - __pyx_L6_error:; - __Pyx_XDECREF(__pyx_8genexpr5__pyx_v_x); __pyx_8genexpr5__pyx_v_x = 0; - goto __pyx_L1_error; - __pyx_L9_exit_scope:; - } /* exit inner scope */ - __pyx_v_new_size = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":162 - * else: # list i.e. img_size=[640, 480] - * new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] - * if new_size != imgsz: # <<<<<<<<<<<<<< - * LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') - * return new_size - */ - __pyx_t_3 = PyObject_RichCompare(__pyx_v_new_size, __pyx_v_imgsz, Py_NE); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 162, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 162, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_2) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":163 - * new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] - * if new_size != imgsz: - * LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') # <<<<<<<<<<<<<< - * return new_size - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_LOGGER); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_warning); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_9 = 0; - __pyx_t_13 = 127; - __Pyx_INCREF(__pyx_kp_u_WARNING_img_size); - __pyx_t_9 += 20; - __Pyx_GIVEREF(__pyx_kp_u_WARNING_img_size); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_WARNING_img_size); - __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_imgsz, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_13 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) > __pyx_t_13) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) : __pyx_t_13; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __pyx_t_4 = 0; - __Pyx_INCREF(__pyx_kp_u_must_be_multiple_of_max_stride); - __pyx_t_9 += 32; - __Pyx_GIVEREF(__pyx_kp_u_must_be_multiple_of_max_stride); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_must_be_multiple_of_max_stride); - __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_s, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_13 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) > __pyx_t_13) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) : __pyx_t_13; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); - __pyx_t_4 = 0; - __Pyx_INCREF(__pyx_kp_u_updating_to); - __pyx_t_9 += 14; - __Pyx_GIVEREF(__pyx_kp_u_updating_to); - PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_kp_u_updating_to); - __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_new_size, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_13 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) > __pyx_t_13) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_4) : __pyx_t_13; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 5, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_5, 6, __pyx_t_9, __pyx_t_13); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_4}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_8, 1+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 163, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":162 - * else: # list i.e. img_size=[640, 480] - * new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] - * if new_size != imgsz: # <<<<<<<<<<<<<< - * LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') - * return new_size - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":164 - * if new_size != imgsz: - * LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') - * return new_size # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_new_size); - __pyx_r = __pyx_v_new_size; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":156 - * - * - * def check_img_size(imgsz, s=32, floor=0): # <<<<<<<<<<<<<< - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_XDECREF(__pyx_t_12); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.check_img_size", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_new_size); - __Pyx_XDECREF(__pyx_8genexpr5__pyx_v_x); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":167 - * - * - * def url2file(url): # <<<<<<<<<<<<<< - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_33url2file(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_33url2file = {"url2file", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_33url2file, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_33url2file(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_url = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("url2file (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_url,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_url,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_url)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 167, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "url2file") < 0)) __PYX_ERR(0, 167, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_url = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("url2file", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 167, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.url2file", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_32url2file(__pyx_self, __pyx_v_url); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_32url2file(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url) { - PyObject *__pyx_v_file = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("url2file", 0); - __Pyx_INCREF(__pyx_v_url); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":169 - * def url2file(url): - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ # <<<<<<<<<<<<<< - * file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth - * return file - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Path); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_url}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = __Pyx_PyObject_Str(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_replace); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_url, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":170 - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - * file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth # <<<<<<<<<<<<<< - * return file - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_Path); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_urllib); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_parse); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_unquote); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_v_url}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_5}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_name); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_split); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_kp_u__20}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 170, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_file = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":171 - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - * file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth - * return file # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_file); - __pyx_r = __pyx_v_file; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":167 - * - * - * def url2file(url): # <<<<<<<<<<<<<< - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.url2file", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_file); - __Pyx_XDECREF(__pyx_v_url); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":174 - * - * - * def make_divisible(x, divisor): # <<<<<<<<<<<<<< - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_35make_divisible(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_35make_divisible = {"make_divisible", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_35make_divisible, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_35make_divisible(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_divisor = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("make_divisible (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_divisor,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_divisor,0}; - #endif - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 174, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_divisor)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 174, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("make_divisible", 1, 2, 2, 1); __PYX_ERR(0, 174, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "make_divisible") < 0)) __PYX_ERR(0, 174, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_x = values[0]; - __pyx_v_divisor = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("make_divisible", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 174, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.make_divisible", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_34make_divisible(__pyx_self, __pyx_v_x, __pyx_v_divisor); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_34make_divisible(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_divisor) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("make_divisible", 0); - __Pyx_INCREF(__pyx_v_divisor); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":176 - * def make_divisible(x, divisor): - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): # <<<<<<<<<<<<<< - * divisor = int(divisor.max()) # to int - * return math.ceil(x / divisor) * divisor - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_torch); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = PyObject_IsInstance(__pyx_v_divisor, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 176, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = (__pyx_t_3 != 0); - if (__pyx_t_4) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":177 - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): - * divisor = int(divisor.max()) # to int # <<<<<<<<<<<<<< - * return math.ceil(x / divisor) * divisor - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_divisor, __pyx_n_s_max); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_5, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 177, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_divisor, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":176 - * def make_divisible(x, divisor): - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): # <<<<<<<<<<<<<< - * divisor = int(divisor.max()) # to int - * return math.ceil(x / divisor) * divisor - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":178 - * if isinstance(divisor, torch.Tensor): - * divisor = int(divisor.max()) # to int - * return math.ceil(x / divisor) * divisor # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_math); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 178, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_ceil); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 178, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_v_x, __pyx_v_divisor); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 178, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyNumber_Multiply(__pyx_t_1, __pyx_v_divisor); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 178, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":174 - * - * - * def make_divisible(x, divisor): # <<<<<<<<<<<<<< - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.make_divisible", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_divisor); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":181 - * - * - * def clean_str(s): # <<<<<<<<<<<<<< - * # Cleans a string by replacing special characters with underscore _ - * return re.sub(pattern="[|@#!$%&()=?^*;:,><+]", repl="_", string=s) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_37clean_str(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_37clean_str = {"clean_str", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_37clean_str, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_37clean_str(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_s = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("clean_str (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_s,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_s,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_s)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "clean_str") < 0)) __PYX_ERR(0, 181, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_s = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("clean_str", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 181, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.clean_str", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_36clean_str(__pyx_self, __pyx_v_s); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_36clean_str(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_s) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("clean_str", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":183 - * def clean_str(s): - * # Cleans a string by replacing special characters with underscore _ - * return re.sub(pattern="[|@#!$%&()=?^*;:,><+]", repl="_", string=s) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_re); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sub); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_pattern, __pyx_kp_u__21) < 0) __PYX_ERR(0, 183, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_repl, __pyx_n_u__22) < 0) __PYX_ERR(0, 183, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_string, __pyx_v_s) < 0) __PYX_ERR(0, 183, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 183, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":181 - * - * - * def clean_str(s): # <<<<<<<<<<<<<< - * # Cleans a string by replacing special characters with underscore _ - * return re.sub(pattern="[|@#!$%&()=?^*;:,><+]", repl="_", string=s) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.clean_str", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":186 - * - * - * def one_cycle(y1=0.0, y2=1.0, steps=100): # <<<<<<<<<<<<<< - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_39one_cycle(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_39one_cycle = {"one_cycle", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_39one_cycle, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_39one_cycle(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_y1 = 0; - PyObject *__pyx_v_y2 = 0; - PyObject *__pyx_v_steps = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("one_cycle (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_steps,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_y1,&__pyx_n_s_y2,&__pyx_n_s_steps,0}; - #endif - PyObject* values[3] = {0,0,0}; - values[0] = ((PyObject *)((PyObject*)__pyx_float_0_0)); - values[1] = ((PyObject *)((PyObject*)__pyx_float_1_0)); - values[2] = ((PyObject *)((PyObject *)__pyx_int_100)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_y1); - if (value) { values[0] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 186, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_y2); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 186, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_steps); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 186, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "one_cycle") < 0)) __PYX_ERR(0, 186, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_y1 = values[0]; - __pyx_v_y2 = values[1]; - __pyx_v_steps = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("one_cycle", 0, 0, 3, __pyx_nargs); __PYX_ERR(0, 186, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.one_cycle", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_38one_cycle(__pyx_self, __pyx_v_y1, __pyx_v_y2, __pyx_v_steps); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":188 - * def one_cycle(y1=0.0, y2=1.0, steps=100): - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 # <<<<<<<<<<<<<< - * - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9one_cycle_lambda3(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9one_cycle_lambda3 = {"lambda3", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9one_cycle_lambda3, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9one_cycle_lambda3(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("lambda3 (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 188, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "lambda3") < 0)) __PYX_ERR(0, 188, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_x = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("lambda3", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 188, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.one_cycle.lambda3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_lambda_funcdef_lambda3(__pyx_self, __pyx_v_x); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_lambda_funcdef_lambda3(PyObject *__pyx_self, PyObject *__pyx_v_x) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *__pyx_cur_scope; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *__pyx_outer_scope; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("lambda3", 0); - __pyx_outer_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *) __Pyx_CyFunction_GetClosure(__pyx_self); - __pyx_cur_scope = __pyx_outer_scope; - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_math); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_cos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_math); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_pi); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Multiply(__pyx_v_x, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_cur_scope->__pyx_v_steps)) { __Pyx_RaiseClosureNameError("steps"); __PYX_ERR(0, 188, __pyx_L1_error) } - __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_cur_scope->__pyx_v_steps); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_4}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_t_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_3, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_cur_scope->__pyx_v_y2)) { __Pyx_RaiseClosureNameError("y2"); __PYX_ERR(0, 188, __pyx_L1_error) } - if (unlikely(!__pyx_cur_scope->__pyx_v_y1)) { __Pyx_RaiseClosureNameError("y1"); __PYX_ERR(0, 188, __pyx_L1_error) } - __pyx_t_3 = PyNumber_Subtract(__pyx_cur_scope->__pyx_v_y2, __pyx_cur_scope->__pyx_v_y1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Multiply(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_cur_scope->__pyx_v_y1)) { __Pyx_RaiseClosureNameError("y1"); __PYX_ERR(0, 188, __pyx_L1_error) } - __pyx_t_3 = PyNumber_Add(__pyx_t_4, __pyx_cur_scope->__pyx_v_y1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.one_cycle.lambda3", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":186 - * - * - * def one_cycle(y1=0.0, y2=1.0, steps=100): # <<<<<<<<<<<<<< - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_38one_cycle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_y1, PyObject *__pyx_v_y2, PyObject *__pyx_v_steps) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *__pyx_cur_scope; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("one_cycle", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 186, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_v_y1 = __pyx_v_y1; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_y1); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_y1); - __pyx_cur_scope->__pyx_v_y2 = __pyx_v_y2; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_y2); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_y2); - __pyx_cur_scope->__pyx_v_steps = __pyx_v_steps; - __Pyx_INCREF(__pyx_cur_scope->__pyx_v_steps); - __Pyx_GIVEREF(__pyx_cur_scope->__pyx_v_steps); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":188 - * def one_cycle(y1=0.0, y2=1.0, steps=100): - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9one_cycle_lambda3, 0, __pyx_n_s_one_cycle_locals_lambda, ((PyObject*)__pyx_cur_scope), __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 188, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":186 - * - * - * def one_cycle(y1=0.0, y2=1.0, steps=100): # <<<<<<<<<<<<<< - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.one_cycle", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":191 - * - * - * def colorstr(*input): # <<<<<<<<<<<<<< - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_41colorstr(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_41colorstr = {"colorstr", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_41colorstr, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_41colorstr(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_input = 0; - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("colorstr (wrapper)", 0); - if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_VARARGS(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "colorstr", 0))) return NULL; - __Pyx_INCREF(__pyx_args); - __pyx_v_input = __pyx_args; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_40colorstr(__pyx_self, __pyx_v_input); - - /* function exit code */ - __Pyx_DECREF(__pyx_v_input); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_2generator3(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":213 - * 'bold': '\033[1m', - * 'underline': '\033[4m'} - * return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] # <<<<<<<<<<<<<< - * - * - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_genexpr(PyObject *__pyx_self) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *__pyx_cur_scope; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("genexpr", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 213, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *) __pyx_self; - __Pyx_INCREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - __Pyx_GIVEREF((PyObject *)__pyx_cur_scope->__pyx_outer_scope); - { - __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_2generator3, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_colorstr_locals_genexpr, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); if (unlikely(!gen)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_DECREF(__pyx_cur_scope); - __Pyx_RefNannyFinishContext(); - return (PyObject *) gen; - } - - /* function exit code */ - __pyx_L1_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.colorstr.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_2generator3(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ -{ - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *__pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *)__pyx_generator->closure); - PyObject *__pyx_r = NULL; - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("genexpr", 0); - switch (__pyx_generator->resume_label) { - case 0: goto __pyx_L3_first_run; - default: /* CPython raises the right error here */ - __Pyx_RefNannyFinishContext(); - return NULL; - } - __pyx_L3_first_run:; - if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 213, __pyx_L1_error) - __pyx_r = PyList_New(0); if (unlikely(!__pyx_r)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_r); - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_args)) { __Pyx_RaiseClosureNameError("args"); __PYX_ERR(0, 213, __pyx_L1_error) } - if (unlikely(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_args == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); - __PYX_ERR(0, 213, __pyx_L1_error) - } - __pyx_t_1 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_args; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; - for (;;) { - if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely((0 < 0))) __PYX_ERR(0, 213, __pyx_L1_error) - #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - #endif - __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_x); - __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_x, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_colors)) { __Pyx_RaiseClosureNameError("colors"); __PYX_ERR(0, 213, __pyx_L1_error) } - if (unlikely(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_colors == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(0, 213, __pyx_L1_error) - } - __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_colors, __pyx_cur_scope->__pyx_v_x); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (unlikely(__Pyx_ListComp_Append(__pyx_r, (PyObject*)__pyx_t_3))) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_r); __pyx_r = 0; - __Pyx_Generator_Replace_StopIteration(0); - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - #if !CYTHON_USE_EXC_INFO_STACK - __Pyx_Coroutine_ResetAndClearException(__pyx_generator); - #endif - __pyx_generator->resume_label = -1; - __Pyx_Coroutine_clear((PyObject*)__pyx_generator); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":191 - * - * - * def colorstr(*input): # <<<<<<<<<<<<<< - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_40colorstr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *__pyx_cur_scope; - PyObject *__pyx_v_string = NULL; - PyObject *__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_2generator3 = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("colorstr", 0); - __pyx_cur_scope = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr, __pyx_empty_tuple, NULL); - if (unlikely(!__pyx_cur_scope)) { - __pyx_cur_scope = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *)Py_None); - __Pyx_INCREF(Py_None); - __PYX_ERR(0, 191, __pyx_L1_error) - } else { - __Pyx_GOTREF((PyObject *)__pyx_cur_scope); - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":193 - * def colorstr(*input): - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string # <<<<<<<<<<<<<< - * colors = {'black': '\033[30m', # basic colors - * 'red': '\033[31m', - */ - __pyx_t_2 = PyTuple_GET_SIZE(__pyx_v_input); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 193, __pyx_L1_error) - if (((__pyx_t_2 > 1) != 0)) { - __Pyx_INCREF(__pyx_v_input); - __pyx_t_1 = __pyx_v_input; - } else { - __pyx_t_3 = __Pyx_GetItemInt_Tuple(__pyx_v_input, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_INCREF(__pyx_n_u_blue); - __Pyx_GIVEREF(__pyx_n_u_blue); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_n_u_blue); - __Pyx_INCREF(__pyx_n_u_bold); - __Pyx_GIVEREF(__pyx_n_u_bold); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_n_u_bold); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_1 = __pyx_t_4; - __pyx_t_4 = 0; - } - __pyx_t_4 = PySequence_List(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_2 = PyList_GET_SIZE(__pyx_t_4); - if (unlikely(__pyx_t_2 < 1)) { - __Pyx_RaiseNeedMoreValuesError(0+__pyx_t_2); __PYX_ERR(0, 193, __pyx_L1_error) - } - #if CYTHON_COMPILING_IN_CPYTHON - __pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_2-1); - ((PyVarObject*)__pyx_t_4)->ob_size--; - #else - __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_2-1); - #endif - __Pyx_GOTREF(__pyx_t_3); - #if !CYTHON_COMPILING_IN_CPYTHON - __pyx_t_5 = PySequence_GetSlice(__pyx_t_4, 0, __pyx_t_2-1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 193, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); - __pyx_t_4 = __pyx_t_5; __pyx_t_5 = NULL; - #else - (void)__pyx_t_5; - #endif - __Pyx_GIVEREF(__pyx_t_4); - __pyx_cur_scope->__pyx_v_args = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - __pyx_v_string = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":194 - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - * colors = {'black': '\033[30m', # basic colors # <<<<<<<<<<<<<< - * 'red': '\033[31m', - * 'green': '\033[32m', - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(19); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 194, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_black, __pyx_kp_u_30m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_red, __pyx_kp_u_31m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_green, __pyx_kp_u_32m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_yellow, __pyx_kp_u_33m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_blue, __pyx_kp_u_34m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_magenta, __pyx_kp_u_35m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_cyan, __pyx_kp_u_36m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_white, __pyx_kp_u_37m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_black, __pyx_kp_u_90m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_red, __pyx_kp_u_91m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_green, __pyx_kp_u_92m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_yellow, __pyx_kp_u_93m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_blue, __pyx_kp_u_94m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_magenta, __pyx_kp_u_95m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_cyan, __pyx_kp_u_96m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bright_white, __pyx_kp_u_97m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_end, __pyx_kp_u_0m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_bold, __pyx_kp_u_1m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_u_underline, __pyx_kp_u_4m) < 0) __PYX_ERR(0, 194, __pyx_L1_error) - __Pyx_GIVEREF(__pyx_t_1); - __pyx_cur_scope->__pyx_v_colors = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":213 - * 'bold': '\033[1m', - * 'underline': '\033[4m'} - * return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_Generator_Next(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyUnicode_Join(__pyx_kp_u__11, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_FormatSimple(__pyx_v_string, __pyx_empty_unicode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_cur_scope->__pyx_v_colors, __pyx_n_u_end); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyNumber_Add(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 213, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":191 - * - * - * def colorstr(*input): # <<<<<<<<<<<<<< - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.colorstr", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_string); - __Pyx_XDECREF(__pyx_gb_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_8colorstr_2generator3); - __Pyx_DECREF((PyObject *)__pyx_cur_scope); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":216 - * - * - * def labels_to_class_weights(labels, nc=80): # <<<<<<<<<<<<<< - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_43labels_to_class_weights(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_43labels_to_class_weights = {"labels_to_class_weights", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_43labels_to_class_weights, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_43labels_to_class_weights(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_labels = 0; - PyObject *__pyx_v_nc = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("labels_to_class_weights (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_labels,&__pyx_n_s_nc,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_labels,&__pyx_n_s_nc,0}; - #endif - PyObject* values[2] = {0,0}; - values[1] = ((PyObject *)((PyObject *)__pyx_int_80)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 216, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_nc); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 216, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "labels_to_class_weights") < 0)) __PYX_ERR(0, 216, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_labels = values[0]; - __pyx_v_nc = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("labels_to_class_weights", 0, 1, 2, __pyx_nargs); __PYX_ERR(0, 216, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.labels_to_class_weights", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_42labels_to_class_weights(__pyx_self, __pyx_v_labels, __pyx_v_nc); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_42labels_to_class_weights(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_labels, PyObject *__pyx_v_nc) { - PyObject *__pyx_v_classes = NULL; - PyObject *__pyx_v_weights = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("labels_to_class_weights", 0); - __Pyx_INCREF(__pyx_v_labels); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":218 - * def labels_to_class_weights(labels, nc=80): - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded # <<<<<<<<<<<<<< - * return torch.Tensor() - * - */ - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_labels, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = (__pyx_t_1 == Py_None); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":219 - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded - * return torch.Tensor() # <<<<<<<<<<<<<< - * - * labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_torch); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 219, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 219, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_4, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 219, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":218 - * def labels_to_class_weights(labels, nc=80): - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded # <<<<<<<<<<<<<< - * return torch.Tensor() - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":221 - * return torch.Tensor() - * - * labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO # <<<<<<<<<<<<<< - * classes = labels[:, 0].astype(np.int) # labels = [class xywh] - * weights = np.bincount(classes, minlength=nc) # occurrences per class - */ - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_concatenate); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_v_labels, __pyx_int_0}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 221, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF_SET(__pyx_v_labels, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":222 - * - * labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - * classes = labels[:, 0].astype(np.int) # labels = [class xywh] # <<<<<<<<<<<<<< - * weights = np.bincount(classes, minlength=nc) # occurrences per class - * - */ - __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_tuple__24); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_astype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_7}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_v_classes = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":223 - * labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - * classes = labels[:, 0].astype(np.int) # labels = [class xywh] - * weights = np.bincount(classes, minlength=nc) # occurrences per class # <<<<<<<<<<<<<< - * - * # Prepend gridpoint count (for uCE training) - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_bincount); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_classes); - __Pyx_GIVEREF(__pyx_v_classes); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_classes); - __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_minlength, __pyx_v_nc) < 0) __PYX_ERR(0, 223, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_1, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_v_weights = __pyx_t_4; - __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":229 - * # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - * - * weights[weights == 0] = 1 # replace empty bins with 1 # <<<<<<<<<<<<<< - * weights = 1 / weights # number of targets per class - * weights /= weights.sum() # normalize - */ - __pyx_t_4 = __Pyx_PyInt_EqObjC(__pyx_v_weights, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 229, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (unlikely((PyObject_SetItem(__pyx_v_weights, __pyx_t_4, __pyx_int_1) < 0))) __PYX_ERR(0, 229, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":230 - * - * weights[weights == 0] = 1 # replace empty bins with 1 - * weights = 1 / weights # number of targets per class # <<<<<<<<<<<<<< - * weights /= weights.sum() # normalize - * return torch.from_numpy(weights) - */ - __pyx_t_4 = __Pyx_PyNumber_Divide(__pyx_int_1, __pyx_v_weights); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 230, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF_SET(__pyx_v_weights, __pyx_t_4); - __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":231 - * weights[weights == 0] = 1 # replace empty bins with 1 - * weights = 1 / weights # number of targets per class - * weights /= weights.sum() # normalize # <<<<<<<<<<<<<< - * return torch.from_numpy(weights) - * - */ - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_weights, __pyx_n_s_sum); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 231, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_1 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_1, }; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 231, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __pyx_t_7 = __Pyx_PyNumber_InPlaceDivide(__pyx_v_weights, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 231, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF_SET(__pyx_v_weights, __pyx_t_7); - __pyx_t_7 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":232 - * weights = 1 / weights # number of targets per class - * weights /= weights.sum() # normalize - * return torch.from_numpy(weights) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_torch); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 232, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_from_numpy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 232, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_weights}; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 232, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_r = __pyx_t_7; - __pyx_t_7 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":216 - * - * - * def labels_to_class_weights(labels, nc=80): # <<<<<<<<<<<<<< - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.labels_to_class_weights", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_classes); - __Pyx_XDECREF(__pyx_v_weights); - __Pyx_XDECREF(__pyx_v_labels); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":235 - * - * - * def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # <<<<<<<<<<<<<< - * # Produces image weights based on class_weights and image contents - * class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) - */ - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_66__defaults__(CYTHON_UNUSED PyObject *__pyx_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__defaults__", 0); - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)__pyx_int_80)); - __Pyx_GIVEREF(((PyObject *)__pyx_int_80)); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_int_80)); - __Pyx_INCREF(__Pyx_CyFunction_Defaults(__pyx_defaults1, __pyx_self)->__pyx_arg_class_weights); - __Pyx_GIVEREF(__Pyx_CyFunction_Defaults(__pyx_defaults1, __pyx_self)->__pyx_arg_class_weights); - PyTuple_SET_ITEM(__pyx_t_1, 1, __Pyx_CyFunction_Defaults(__pyx_defaults1, __pyx_self)->__pyx_arg_class_weights); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None); - __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.__defaults__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_45labels_to_image_weights(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_45labels_to_image_weights = {"labels_to_image_weights", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_45labels_to_image_weights, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_45labels_to_image_weights(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_labels = 0; - PyObject *__pyx_v_nc = 0; - PyObject *__pyx_v_class_weights = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("labels_to_image_weights (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_labels,&__pyx_n_s_nc,&__pyx_n_s_class_weights,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_labels,&__pyx_n_s_nc,&__pyx_n_s_class_weights,0}; - #endif - PyObject* values[3] = {0,0,0}; - __pyx_defaults1 *__pyx_dynamic_args = __Pyx_CyFunction_Defaults(__pyx_defaults1, __pyx_self); - values[1] = ((PyObject *)((PyObject *)__pyx_int_80)); - values[2] = __pyx_dynamic_args->__pyx_arg_class_weights; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 235, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_nc); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 235, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_class_weights); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 235, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "labels_to_image_weights") < 0)) __PYX_ERR(0, 235, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_labels = values[0]; - __pyx_v_nc = values[1]; - __pyx_v_class_weights = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("labels_to_image_weights", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 235, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.labels_to_image_weights", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_44labels_to_image_weights(__pyx_self, __pyx_v_labels, __pyx_v_nc, __pyx_v_class_weights); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_44labels_to_image_weights(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_labels, PyObject *__pyx_v_nc, PyObject *__pyx_v_class_weights) { - PyObject *__pyx_v_class_counts = NULL; - PyObject *__pyx_v_image_weights = NULL; - PyObject *__pyx_8genexpr7__pyx_v_x = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - Py_ssize_t __pyx_t_5; - PyObject *(*__pyx_t_6)(PyObject *); - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - int __pyx_t_12; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("labels_to_image_weights", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":237 - * def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - * # Produces image weights based on class_weights and image contents - * class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) # <<<<<<<<<<<<<< - * image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) - * # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_array); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - { /* enter inner scope */ - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(PyList_CheckExact(__pyx_v_labels)) || PyTuple_CheckExact(__pyx_v_labels)) { - __pyx_t_4 = __pyx_v_labels; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; - __pyx_t_6 = NULL; - } else { - __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_labels); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 237, __pyx_L5_error) - } - for (;;) { - if (likely(!__pyx_t_6)) { - if (likely(PyList_CheckExact(__pyx_t_4))) { - if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely((0 < 0))) __PYX_ERR(0, 237, __pyx_L5_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } else { - if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely((0 < 0))) __PYX_ERR(0, 237, __pyx_L5_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } - } else { - __pyx_t_7 = __pyx_t_6(__pyx_t_4); - if (unlikely(!__pyx_t_7)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 237, __pyx_L5_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_7); - } - __Pyx_XDECREF_SET(__pyx_8genexpr7__pyx_v_x, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_bincount); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_8genexpr7__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_astype); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_np); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_int); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = NULL; - __pyx_t_12 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_10))) { - __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_10); - if (likely(__pyx_t_9)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); - __Pyx_INCREF(__pyx_t_9); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_10, function); - __pyx_t_12 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_t_11}; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_10, __pyx_callargs+1-__pyx_t_12, 1+__pyx_t_12); - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - } - __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_7); - if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_minlength, __pyx_v_nc) < 0) __PYX_ERR(0, 237, __pyx_L5_error) - __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_t_10, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_11))) __PYX_ERR(0, 237, __pyx_L5_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_8genexpr7__pyx_v_x); __pyx_8genexpr7__pyx_v_x = 0; - goto __pyx_L8_exit_scope; - __pyx_L5_error:; - __Pyx_XDECREF(__pyx_8genexpr7__pyx_v_x); __pyx_8genexpr7__pyx_v_x = 0; - goto __pyx_L1_error; - __pyx_L8_exit_scope:; - } /* exit inner scope */ - __pyx_t_4 = NULL; - __pyx_t_12 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_12 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_12, 1+__pyx_t_12); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_class_counts = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":238 - * # Produces image weights based on class_weights and image contents - * class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) - * image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) # <<<<<<<<<<<<<< - * # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - * return image_weights - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_class_weights, __pyx_n_s_reshape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 238, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - __pyx_t_12 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_12 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_int_1, __pyx_v_nc}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_12, 2+__pyx_t_12); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 238, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyNumber_Multiply(__pyx_t_3, __pyx_v_class_counts); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 238, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sum); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 238, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_12 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_12 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_int_1}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_12, 1+__pyx_t_12); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 238, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_image_weights = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":240 - * image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) - * # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - * return image_weights # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_image_weights); - __pyx_r = __pyx_v_image_weights; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":235 - * - * - * def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # <<<<<<<<<<<<<< - * # Produces image weights based on class_weights and image contents - * class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.labels_to_image_weights", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_class_counts); - __Pyx_XDECREF(__pyx_v_image_weights); - __Pyx_XDECREF(__pyx_8genexpr7__pyx_v_x); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":243 - * - * - * def xyxy2xywh(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_47xyxy2xywh(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_47xyxy2xywh = {"xyxy2xywh", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_47xyxy2xywh, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_47xyxy2xywh(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("xyxy2xywh (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 243, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "xyxy2xywh") < 0)) __PYX_ERR(0, 243, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_x = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("xyxy2xywh", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 243, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xyxy2xywh", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_46xyxy2xywh(__pyx_self, __pyx_v_x); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_46xyxy2xywh(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x) { - PyObject *__pyx_v_y = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("xyxy2xywh", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":245 - * def xyxy2xywh(x): - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) # <<<<<<<<<<<<<< - * y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torch); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = PyObject_IsInstance(__pyx_v_x, __pyx_t_3); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if ((__pyx_t_4 != 0)) { - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_clone); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_5, }; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_x}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_v_y = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":246 - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center # <<<<<<<<<<<<<< - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - * y[:, 2] = x[:, 2] - x[:, 0] # width - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = PyNumber_Add(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_5, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__24, __pyx_t_3) < 0))) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":247 - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center # <<<<<<<<<<<<<< - * y[:, 2] = x[:, 2] - x[:, 0] # width - * y[:, 3] = x[:, 3] - x[:, 1] # height - */ - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_1, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__26, __pyx_t_5) < 0))) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":248 - * y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - * y[:, 2] = x[:, 2] - x[:, 0] # width # <<<<<<<<<<<<<< - * y[:, 3] = x[:, 3] - x[:, 1] # height - * return y - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 248, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 248, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 248, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__25, __pyx_t_3) < 0))) __PYX_ERR(0, 248, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":249 - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - * y[:, 2] = x[:, 2] - x[:, 0] # width - * y[:, 3] = x[:, 3] - x[:, 1] # height # <<<<<<<<<<<<<< - * return y - * - */ - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = PyNumber_Subtract(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__27, __pyx_t_5) < 0))) __PYX_ERR(0, 249, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":250 - * y[:, 2] = x[:, 2] - x[:, 0] # width - * y[:, 3] = x[:, 3] - x[:, 1] # height - * return y # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_y); - __pyx_r = __pyx_v_y; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":243 - * - * - * def xyxy2xywh(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xyxy2xywh", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_y); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":253 - * - * - * def xywh2xyxy(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_49xywh2xyxy(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_49xywh2xyxy = {"xywh2xyxy", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_49xywh2xyxy, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_49xywh2xyxy(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("xywh2xyxy (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,0}; - #endif - PyObject* values[1] = {0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 253, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "xywh2xyxy") < 0)) __PYX_ERR(0, 253, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 1)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - } - __pyx_v_x = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("xywh2xyxy", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 253, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xywh2xyxy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_48xywh2xyxy(__pyx_self, __pyx_v_x); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_48xywh2xyxy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x) { - PyObject *__pyx_v_y = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("xywh2xyxy", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":255 - * def xywh2xyxy(x): - * # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) # <<<<<<<<<<<<<< - * y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - * y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torch); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = PyObject_IsInstance(__pyx_v_x, __pyx_t_3); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if ((__pyx_t_4 != 0)) { - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_clone); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_5, }; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_x}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 255, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_v_y = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":256 - * # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x # <<<<<<<<<<<<<< - * y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - * y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 256, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 256, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_3, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 256, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 256, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__24, __pyx_t_3) < 0))) __PYX_ERR(0, 256, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":257 - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - * y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y # <<<<<<<<<<<<<< - * y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - * y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - */ - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 257, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 257, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_5, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 257, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Subtract(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 257, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__26, __pyx_t_5) < 0))) __PYX_ERR(0, 257, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":258 - * y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - * y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - * y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x # <<<<<<<<<<<<<< - * y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - * return y - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_1, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__25, __pyx_t_1) < 0))) __PYX_ERR(0, 258, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":259 - * y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - * y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - * y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y # <<<<<<<<<<<<<< - * return y - * - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_3, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__27, __pyx_t_3) < 0))) __PYX_ERR(0, 259, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":260 - * y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - * y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - * return y # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_y); - __pyx_r = __pyx_v_y; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":253 - * - * - * def xywh2xyxy(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xywh2xyxy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_y); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":263 - * - * - * def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_51xywhn2xyxy(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_51xywhn2xyxy = {"xywhn2xyxy", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_51xywhn2xyxy, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_51xywhn2xyxy(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_w = 0; - PyObject *__pyx_v_h = 0; - PyObject *__pyx_v_padw = 0; - PyObject *__pyx_v_padh = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("xywhn2xyxy (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_w,&__pyx_n_s_h,&__pyx_n_s_padw,&__pyx_n_s_padh,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_w,&__pyx_n_s_h,&__pyx_n_s_padw,&__pyx_n_s_padh,0}; - #endif - PyObject* values[5] = {0,0,0,0,0}; - values[1] = ((PyObject *)((PyObject *)__pyx_int_640)); - values[2] = ((PyObject *)((PyObject *)__pyx_int_640)); - values[3] = ((PyObject *)((PyObject *)__pyx_int_0)); - values[4] = ((PyObject *)((PyObject *)__pyx_int_0)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 263, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_w); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 263, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_h); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 263, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_padw); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 263, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_padh); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 263, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "xywhn2xyxy") < 0)) __PYX_ERR(0, 263, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_x = values[0]; - __pyx_v_w = values[1]; - __pyx_v_h = values[2]; - __pyx_v_padw = values[3]; - __pyx_v_padh = values[4]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("xywhn2xyxy", 0, 1, 5, __pyx_nargs); __PYX_ERR(0, 263, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xywhn2xyxy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_50xywhn2xyxy(__pyx_self, __pyx_v_x, __pyx_v_w, __pyx_v_h, __pyx_v_padw, __pyx_v_padh); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_50xywhn2xyxy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_w, PyObject *__pyx_v_h, PyObject *__pyx_v_padw, PyObject *__pyx_v_padh) { - PyObject *__pyx_v_y = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("xywhn2xyxy", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":265 - * def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): - * # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) # <<<<<<<<<<<<<< - * y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - * y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torch); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = PyObject_IsInstance(__pyx_v_x, __pyx_t_3); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if ((__pyx_t_4 != 0)) { - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_clone); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_5, }; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_x}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 265, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_v_y = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":266 - * # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x # <<<<<<<<<<<<<< - * y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - * y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_3, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Multiply(__pyx_v_w, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Add(__pyx_t_5, __pyx_v_padw); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__24, __pyx_t_3) < 0))) __PYX_ERR(0, 266, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":267 - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - * y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y # <<<<<<<<<<<<<< - * y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - * y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y - */ - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_5, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Subtract(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Multiply(__pyx_v_h, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Add(__pyx_t_1, __pyx_v_padh); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__26, __pyx_t_5) < 0))) __PYX_ERR(0, 267, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":268 - * y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - * y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - * y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x # <<<<<<<<<<<<<< - * y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y - * return y - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_1, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_v_w, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_v_padw); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__25, __pyx_t_1) < 0))) __PYX_ERR(0, 268, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":269 - * y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - * y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - * y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y # <<<<<<<<<<<<<< - * return y - * - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_3, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Multiply(__pyx_v_h, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Add(__pyx_t_5, __pyx_v_padh); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__27, __pyx_t_3) < 0))) __PYX_ERR(0, 269, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":270 - * y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - * y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y - * return y # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_y); - __pyx_r = __pyx_v_y; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":263 - * - * - * def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xywhn2xyxy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_y); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":273 - * - * - * def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_53xyxy2xywhn(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_53xyxy2xywhn = {"xyxy2xywhn", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_53xyxy2xywhn, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_53xyxy2xywhn(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_w = 0; - PyObject *__pyx_v_h = 0; - PyObject *__pyx_v_clip = 0; - PyObject *__pyx_v_eps = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("xyxy2xywhn (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_w,&__pyx_n_s_h,&__pyx_n_s_clip,&__pyx_n_s_eps,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_w,&__pyx_n_s_h,&__pyx_n_s_clip,&__pyx_n_s_eps,0}; - #endif - PyObject* values[5] = {0,0,0,0,0}; - values[1] = ((PyObject *)((PyObject *)__pyx_int_640)); - values[2] = ((PyObject *)((PyObject *)__pyx_int_640)); - values[3] = ((PyObject *)((PyObject *)Py_False)); - values[4] = ((PyObject *)((PyObject*)__pyx_float_0_0)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 273, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_w); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 273, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_h); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 273, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_clip); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 273, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_eps); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 273, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "xyxy2xywhn") < 0)) __PYX_ERR(0, 273, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_x = values[0]; - __pyx_v_w = values[1]; - __pyx_v_h = values[2]; - __pyx_v_clip = values[3]; - __pyx_v_eps = values[4]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("xyxy2xywhn", 0, 1, 5, __pyx_nargs); __PYX_ERR(0, 273, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xyxy2xywhn", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_52xyxy2xywhn(__pyx_self, __pyx_v_x, __pyx_v_w, __pyx_v_h, __pyx_v_clip, __pyx_v_eps); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_52xyxy2xywhn(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_w, PyObject *__pyx_v_h, PyObject *__pyx_v_clip, PyObject *__pyx_v_eps) { - PyObject *__pyx_v_y = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("xyxy2xywhn", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":275 - * def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: # <<<<<<<<<<<<<< - * clip_coords(x, (h - eps, w - eps)) # warning: inplace clip - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_clip); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 275, __pyx_L1_error) - if (__pyx_t_1) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":276 - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: - * clip_coords(x, (h - eps, w - eps)) # warning: inplace clip # <<<<<<<<<<<<<< - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_clip_coords); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Subtract(__pyx_v_h, __pyx_v_eps); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_w, __pyx_v_eps); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5); - __pyx_t_4 = 0; - __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_v_x, __pyx_t_6}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 276, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":275 - * def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: # <<<<<<<<<<<<<< - * clip_coords(x, (h - eps, w - eps)) # warning: inplace clip - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":277 - * if clip: - * clip_coords(x, (h - eps, w - eps)) # warning: inplace clip - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) # <<<<<<<<<<<<<< - * y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - * y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_torch); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_1 = PyObject_IsInstance(__pyx_v_x, __pyx_t_6); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if ((__pyx_t_1 != 0)) { - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_clone); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_5, }; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 0+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_2 = __pyx_t_6; - __pyx_t_6 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_x}; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 277, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_2 = __pyx_t_6; - __pyx_t_6 = 0; - } - __pyx_v_y = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":278 - * clip_coords(x, (h - eps, w - eps)) # warning: inplace clip - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center # <<<<<<<<<<<<<< - * y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - * y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = PyNumber_Add(__pyx_t_2, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_5, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_6, __pyx_v_w); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__24, __pyx_t_5) < 0))) __PYX_ERR(0, 278, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":279 - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - * y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center # <<<<<<<<<<<<<< - * y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - * y[:, 3] = (x[:, 3] - x[:, 1]) / h # height - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 279, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 279, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = PyNumber_Add(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 279, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_2, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 279, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_6, __pyx_v_h); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 279, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__26, __pyx_t_2) < 0))) __PYX_ERR(0, 279, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":280 - * y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - * y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - * y[:, 2] = (x[:, 2] - x[:, 0]) / w # width # <<<<<<<<<<<<<< - * y[:, 3] = (x[:, 3] - x[:, 1]) / h # height - * return y - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__25); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = PyNumber_Subtract(__pyx_t_2, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyNumber_Divide(__pyx_t_5, __pyx_v_w); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__25, __pyx_t_6) < 0))) __PYX_ERR(0, 280, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":281 - * y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - * y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - * y[:, 3] = (x[:, 3] - x[:, 1]) / h # height # <<<<<<<<<<<<<< - * return y - * - */ - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__27); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyNumber_Subtract(__pyx_t_6, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_v_h); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__27, __pyx_t_5) < 0))) __PYX_ERR(0, 281, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":282 - * y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - * y[:, 3] = (x[:, 3] - x[:, 1]) / h # height - * return y # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_y); - __pyx_r = __pyx_v_y; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":273 - * - * - * def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xyxy2xywhn", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_y); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":285 - * - * - * def xyn2xy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert normalized segments into pixel segments, shape (n,2) - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_55xyn2xy(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_55xyn2xy = {"xyn2xy", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_55xyn2xy, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_55xyn2xy(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_x = 0; - PyObject *__pyx_v_w = 0; - PyObject *__pyx_v_h = 0; - PyObject *__pyx_v_padw = 0; - PyObject *__pyx_v_padh = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("xyn2xy (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_w,&__pyx_n_s_h,&__pyx_n_s_padw,&__pyx_n_s_padh,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_w,&__pyx_n_s_h,&__pyx_n_s_padw,&__pyx_n_s_padh,0}; - #endif - PyObject* values[5] = {0,0,0,0,0}; - values[1] = ((PyObject *)((PyObject *)__pyx_int_640)); - values[2] = ((PyObject *)((PyObject *)__pyx_int_640)); - values[3] = ((PyObject *)((PyObject *)__pyx_int_0)); - values[4] = ((PyObject *)((PyObject *)__pyx_int_0)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_x)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 285, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_w); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 285, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_h); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 285, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_padw); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 285, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_padh); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 285, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "xyn2xy") < 0)) __PYX_ERR(0, 285, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_x = values[0]; - __pyx_v_w = values[1]; - __pyx_v_h = values[2]; - __pyx_v_padw = values[3]; - __pyx_v_padh = values[4]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("xyn2xy", 0, 1, 5, __pyx_nargs); __PYX_ERR(0, 285, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xyn2xy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_54xyn2xy(__pyx_self, __pyx_v_x, __pyx_v_w, __pyx_v_h, __pyx_v_padw, __pyx_v_padh); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_54xyn2xy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x, PyObject *__pyx_v_w, PyObject *__pyx_v_h, PyObject *__pyx_v_padw, PyObject *__pyx_v_padh) { - PyObject *__pyx_v_y = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("xyn2xy", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":287 - * def xyn2xy(x, w=640, h=640, padw=0, padh=0): - * # Convert normalized segments into pixel segments, shape (n,2) - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) # <<<<<<<<<<<<<< - * y[:, 0] = w * x[:, 0] + padw # top left x - * y[:, 1] = h * x[:, 1] + padh # top left y - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torch); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = PyObject_IsInstance(__pyx_v_x, __pyx_t_3); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if ((__pyx_t_4 != 0)) { - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_clone); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_5, }; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_copy); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_x}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_1 = __pyx_t_3; - __pyx_t_3 = 0; - } - __pyx_v_y = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":288 - * # Convert normalized segments into pixel segments, shape (n,2) - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = w * x[:, 0] + padw # top left x # <<<<<<<<<<<<<< - * y[:, 1] = h * x[:, 1] + padh # top left y - * return y - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__24); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(__pyx_v_w, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_v_padw); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__24, __pyx_t_1) < 0))) __PYX_ERR(0, 288, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":289 - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = w * x[:, 0] + padw # top left x - * y[:, 1] = h * x[:, 1] + padh # top left y # <<<<<<<<<<<<<< - * return y - * - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__26); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 289, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PyNumber_Multiply(__pyx_v_h, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 289, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_v_padh); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 289, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_y, __pyx_tuple__26, __pyx_t_1) < 0))) __PYX_ERR(0, 289, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":290 - * y[:, 0] = w * x[:, 0] + padw # top left x - * y[:, 1] = h * x[:, 1] + padh # top left y - * return y # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_y); - __pyx_r = __pyx_v_y; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":285 - * - * - * def xyn2xy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert normalized segments into pixel segments, shape (n,2) - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.xyn2xy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_y); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":293 - * - * - * def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # <<<<<<<<<<<<<< - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_57scale_coords(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_57scale_coords = {"scale_coords", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_57scale_coords, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_57scale_coords(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_img1_shape = 0; - PyObject *__pyx_v_coords = 0; - PyObject *__pyx_v_img0_shape = 0; - PyObject *__pyx_v_ratio_pad = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("scale_coords (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_img1_shape,&__pyx_n_s_coords,&__pyx_n_s_img0_shape,&__pyx_n_s_ratio_pad,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_img1_shape,&__pyx_n_s_coords,&__pyx_n_s_img0_shape,&__pyx_n_s_ratio_pad,0}; - #endif - PyObject* values[4] = {0,0,0,0}; - values[3] = ((PyObject *)((PyObject *)Py_None)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_img1_shape)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 293, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_coords)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 293, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("scale_coords", 0, 3, 4, 1); __PYX_ERR(0, 293, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_img0_shape)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 293, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("scale_coords", 0, 3, 4, 2); __PYX_ERR(0, 293, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_ratio_pad); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 293, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "scale_coords") < 0)) __PYX_ERR(0, 293, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_img1_shape = values[0]; - __pyx_v_coords = values[1]; - __pyx_v_img0_shape = values[2]; - __pyx_v_ratio_pad = values[3]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("scale_coords", 0, 3, 4, __pyx_nargs); __PYX_ERR(0, 293, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.scale_coords", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_56scale_coords(__pyx_self, __pyx_v_img1_shape, __pyx_v_coords, __pyx_v_img0_shape, __pyx_v_ratio_pad); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_56scale_coords(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_img1_shape, PyObject *__pyx_v_coords, PyObject *__pyx_v_img0_shape, PyObject *__pyx_v_ratio_pad) { - PyObject *__pyx_v_gain = NULL; - PyObject *__pyx_v_pad = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("scale_coords", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":295 - * def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape # <<<<<<<<<<<<<< - * gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - * pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - */ - __pyx_t_1 = (__pyx_v_ratio_pad == Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":296 - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape - * gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new # <<<<<<<<<<<<<< - * pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - * else: - */ - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_img1_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_img0_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_img1_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_img0_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = PyObject_RichCompare(__pyx_t_5, __pyx_t_6, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 296, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 296, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (__pyx_t_2) { - __Pyx_INCREF(__pyx_t_5); - __pyx_t_3 = __pyx_t_5; - } else { - __Pyx_INCREF(__pyx_t_6); - __pyx_t_3 = __pyx_t_6; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __pyx_t_3; - __Pyx_INCREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_gain = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":297 - * if ratio_pad is None: # calculate from img0_shape - * gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - * pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding # <<<<<<<<<<<<<< - * else: - * gain = ratio_pad[0][0] - */ - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_img1_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_img0_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = PyNumber_Multiply(__pyx_t_3, __pyx_v_gain); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Subtract(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_3, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_img1_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_img0_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = PyNumber_Multiply(__pyx_t_5, __pyx_v_gain); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyNumber_Subtract(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_5, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 297, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __pyx_t_6 = 0; - __pyx_t_4 = 0; - __pyx_v_pad = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":295 - * def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape # <<<<<<<<<<<<<< - * gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - * pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - */ - goto __pyx_L3; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":299 - * pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - * else: - * gain = ratio_pad[0][0] # <<<<<<<<<<<<<< - * pad = ratio_pad[1] - * - */ - /*else*/ { - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_ratio_pad, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 299, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 299, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_gain = __pyx_t_4; - __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":300 - * else: - * gain = ratio_pad[0][0] - * pad = ratio_pad[1] # <<<<<<<<<<<<<< - * - * coords[:, [0, 2]] -= pad[0] # x padding - */ - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_ratio_pad, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 300, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_v_pad = __pyx_t_4; - __pyx_t_4 = 0; - } - __pyx_L3:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":302 - * pad = ratio_pad[1] - * - * coords[:, [0, 2]] -= pad[0] # x padding # <<<<<<<<<<<<<< - * coords[:, [1, 3]] -= pad[1] # y padding - * coords[:, :4] /= gain - */ - __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 302, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyList_SET_ITEM(__pyx_t_4, 0, __pyx_int_0); - __Pyx_INCREF(__pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - PyList_SET_ITEM(__pyx_t_4, 1, __pyx_int_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 302, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_slice__23); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_v_coords, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 302, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_pad, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 302, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = PyNumber_InPlaceSubtract(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 302, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_coords, __pyx_t_5, __pyx_t_3) < 0))) __PYX_ERR(0, 302, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":303 - * - * coords[:, [0, 2]] -= pad[0] # x padding - * coords[:, [1, 3]] -= pad[1] # y padding # <<<<<<<<<<<<<< - * coords[:, :4] /= gain - * clip_coords(coords, img0_shape) - */ - __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 303, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_1); - __Pyx_INCREF(__pyx_int_3); - __Pyx_GIVEREF(__pyx_int_3); - PyList_SET_ITEM(__pyx_t_5, 1, __pyx_int_3); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 303, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_slice__23); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_coords, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 303, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_pad, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 303, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_4 = PyNumber_InPlaceSubtract(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 303, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_coords, __pyx_t_3, __pyx_t_4) < 0))) __PYX_ERR(0, 303, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":304 - * coords[:, [0, 2]] -= pad[0] # x padding - * coords[:, [1, 3]] -= pad[1] # y padding - * coords[:, :4] /= gain # <<<<<<<<<<<<<< - * clip_coords(coords, img0_shape) - * return coords - */ - __Pyx_INCREF(__pyx_tuple__29); - __pyx_t_7 = __pyx_tuple__29; - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_coords, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 304, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyNumber_InPlaceDivide(__pyx_t_3, __pyx_v_gain); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 304, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_coords, __pyx_t_7, __pyx_t_4) < 0))) __PYX_ERR(0, 304, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":305 - * coords[:, [1, 3]] -= pad[1] # y padding - * coords[:, :4] /= gain - * clip_coords(coords, img0_shape) # <<<<<<<<<<<<<< - * return coords - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_clip_coords); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 305, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_v_coords, __pyx_v_img0_shape}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 305, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":306 - * coords[:, :4] /= gain - * clip_coords(coords, img0_shape) - * return coords # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_coords); - __pyx_r = __pyx_v_coords; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":293 - * - * - * def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # <<<<<<<<<<<<<< - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.scale_coords", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_gain); - __Pyx_XDECREF(__pyx_v_pad); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":309 - * - * - * def clip_coords(boxes, shape): # <<<<<<<<<<<<<< - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_59clip_coords(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_59clip_coords = {"clip_coords", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_59clip_coords, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_59clip_coords(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_boxes = 0; - PyObject *__pyx_v_shape = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("clip_coords (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes,&__pyx_n_s_shape,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes,&__pyx_n_s_shape,0}; - #endif - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_boxes)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 309, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_shape)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 309, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("clip_coords", 1, 2, 2, 1); __PYX_ERR(0, 309, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "clip_coords") < 0)) __PYX_ERR(0, 309, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_boxes = values[0]; - __pyx_v_shape = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("clip_coords", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 309, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.clip_coords", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_58clip_coords(__pyx_self, __pyx_v_boxes, __pyx_v_shape); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_58clip_coords(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_boxes, PyObject *__pyx_v_shape) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("clip_coords", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":311 - * def clip_coords(boxes, shape): - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually # <<<<<<<<<<<<<< - * boxes[:, 0].clamp_(0, shape[1]) # x1 - * boxes[:, 1].clamp_(0, shape[0]) # y1 - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_torch); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 311, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_Tensor); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 311, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = PyObject_IsInstance(__pyx_v_boxes, __pyx_t_2); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 311, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = (__pyx_t_3 != 0); - if (__pyx_t_4) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":312 - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually - * boxes[:, 0].clamp_(0, shape[1]) # x1 # <<<<<<<<<<<<<< - * boxes[:, 1].clamp_(0, shape[0]) # y1 - * boxes[:, 2].clamp_(0, shape[1]) # x2 - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_tuple__24); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 312, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_clamp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 312, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 312, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_0, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 312, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":313 - * if isinstance(boxes, torch.Tensor): # faster individually - * boxes[:, 0].clamp_(0, shape[1]) # x1 - * boxes[:, 1].clamp_(0, shape[0]) # y1 # <<<<<<<<<<<<<< - * boxes[:, 2].clamp_(0, shape[1]) # x2 - * boxes[:, 3].clamp_(0, shape[0]) # y2 - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_tuple__26); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 313, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_clamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 313, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 313, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_0, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 313, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":314 - * boxes[:, 0].clamp_(0, shape[1]) # x1 - * boxes[:, 1].clamp_(0, shape[0]) # y1 - * boxes[:, 2].clamp_(0, shape[1]) # x2 # <<<<<<<<<<<<<< - * boxes[:, 3].clamp_(0, shape[0]) # y2 - * else: # np.array (faster grouped) - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_tuple__25); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 314, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_clamp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 314, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 314, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_0, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 314, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":315 - * boxes[:, 1].clamp_(0, shape[0]) # y1 - * boxes[:, 2].clamp_(0, shape[1]) # x2 - * boxes[:, 3].clamp_(0, shape[0]) # y2 # <<<<<<<<<<<<<< - * else: # np.array (faster grouped) - * boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_tuple__27); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 315, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_clamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 315, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 315, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_0, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 315, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":311 - * def clip_coords(boxes, shape): - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually # <<<<<<<<<<<<<< - * boxes[:, 0].clamp_(0, shape[1]) # x1 - * boxes[:, 1].clamp_(0, shape[0]) # y1 - */ - goto __pyx_L3; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":317 - * boxes[:, 3].clamp_(0, shape[0]) # y2 - * else: # np.array (faster grouped) - * boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 # <<<<<<<<<<<<<< - * boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 - * - */ - /*else*/ { - __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_0); - __Pyx_INCREF(__pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_2); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_slice__23); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_clip); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_0, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_0); - __Pyx_INCREF(__pyx_int_2); - __Pyx_GIVEREF(__pyx_int_2); - PyList_SET_ITEM(__pyx_t_5, 1, __pyx_int_2); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_slice__23); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); - __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_2) < 0))) __PYX_ERR(0, 317, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":318 - * else: # np.array (faster grouped) - * boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - * boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_int_1); - __Pyx_INCREF(__pyx_int_3); - __Pyx_GIVEREF(__pyx_int_3); - PyList_SET_ITEM(__pyx_t_1, 1, __pyx_int_3); - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_slice__23); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_clip); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_6 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_0, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_int_1); - __Pyx_GIVEREF(__pyx_int_1); - PyList_SET_ITEM(__pyx_t_5, 0, __pyx_int_1); - __Pyx_INCREF(__pyx_int_3); - __Pyx_GIVEREF(__pyx_int_3); - PyList_SET_ITEM(__pyx_t_5, 1, __pyx_int_3); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_slice__23); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); - __pyx_t_5 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_boxes, __pyx_t_1, __pyx_t_2) < 0))) __PYX_ERR(0, 318, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L3:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":309 - * - * - * def clip_coords(boxes, shape): # <<<<<<<<<<<<<< - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.clip_coords", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":321 - * - * - * def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, # <<<<<<<<<<<<<< - * labels=(), max_det=300): - * """Runs Non-Maximum Suppression (NMS) on inference results - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_61non_max_suppression(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -PyDoc_STRVAR(__pyx_doc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_60non_max_suppression, "Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n "); -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_61non_max_suppression = {"non_max_suppression", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_61non_max_suppression, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_60non_max_suppression}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_61non_max_suppression(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_prediction = 0; - PyObject *__pyx_v_conf_thres = 0; - PyObject *__pyx_v_iou_thres = 0; - PyObject *__pyx_v_classes = 0; - PyObject *__pyx_v_agnostic = 0; - PyObject *__pyx_v_multi_label = 0; - PyObject *__pyx_v_labels = 0; - PyObject *__pyx_v_max_det = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("non_max_suppression (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_prediction,&__pyx_n_s_conf_thres,&__pyx_n_s_iou_thres,&__pyx_n_s_classes,&__pyx_n_s_agnostic,&__pyx_n_s_multi_label,&__pyx_n_s_labels,&__pyx_n_s_max_det,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_prediction,&__pyx_n_s_conf_thres,&__pyx_n_s_iou_thres,&__pyx_n_s_classes,&__pyx_n_s_agnostic,&__pyx_n_s_multi_label,&__pyx_n_s_labels,&__pyx_n_s_max_det,0}; - #endif - PyObject* values[8] = {0,0,0,0,0,0,0,0}; - values[1] = ((PyObject *)((PyObject*)__pyx_float_0_25)); - values[2] = ((PyObject *)((PyObject*)__pyx_float_0_45)); - values[3] = ((PyObject *)((PyObject *)Py_None)); - values[4] = ((PyObject *)((PyObject *)Py_False)); - values[5] = ((PyObject *)((PyObject *)Py_False)); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":322 - * - * def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, - * labels=(), max_det=300): # <<<<<<<<<<<<<< - * """Runs Non-Maximum Suppression (NMS) on inference results - * - */ - values[6] = ((PyObject *)((PyObject*)__pyx_empty_tuple)); - values[7] = ((PyObject *)((PyObject *)__pyx_int_300)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 8: values[7] = __Pyx_Arg_FASTCALL(__pyx_args, 7); - CYTHON_FALLTHROUGH; - case 7: values[6] = __Pyx_Arg_FASTCALL(__pyx_args, 6); - CYTHON_FALLTHROUGH; - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_prediction)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_conf_thres); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_iou_thres); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_classes); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_agnostic); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 5: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_multi_label); - if (value) { values[5] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 6: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels); - if (value) { values[6] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 7: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_max_det); - if (value) { values[7] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 321, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "non_max_suppression") < 0)) __PYX_ERR(0, 321, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 8: values[7] = __Pyx_Arg_FASTCALL(__pyx_args, 7); - CYTHON_FALLTHROUGH; - case 7: values[6] = __Pyx_Arg_FASTCALL(__pyx_args, 6); - CYTHON_FALLTHROUGH; - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_prediction = values[0]; - __pyx_v_conf_thres = values[1]; - __pyx_v_iou_thres = values[2]; - __pyx_v_classes = values[3]; - __pyx_v_agnostic = values[4]; - __pyx_v_multi_label = values[5]; - __pyx_v_labels = values[6]; - __pyx_v_max_det = values[7]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("non_max_suppression", 0, 1, 8, __pyx_nargs); __PYX_ERR(0, 321, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.non_max_suppression", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_60non_max_suppression(__pyx_self, __pyx_v_prediction, __pyx_v_conf_thres, __pyx_v_iou_thres, __pyx_v_classes, __pyx_v_agnostic, __pyx_v_multi_label, __pyx_v_labels, __pyx_v_max_det); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":321 - * - * - * def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, # <<<<<<<<<<<<<< - * labels=(), max_det=300): - * """Runs Non-Maximum Suppression (NMS) on inference results - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_60non_max_suppression(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_prediction, PyObject *__pyx_v_conf_thres, PyObject *__pyx_v_iou_thres, PyObject *__pyx_v_classes, PyObject *__pyx_v_agnostic, PyObject *__pyx_v_multi_label, PyObject *__pyx_v_labels, PyObject *__pyx_v_max_det) { - PyObject *__pyx_v_nc = NULL; - PyObject *__pyx_v_xc = NULL; - CYTHON_UNUSED long __pyx_v_min_wh; - long __pyx_v_max_wh; - long __pyx_v_max_nms; - double __pyx_v_time_limit; - int __pyx_v_redundant; - int __pyx_v_merge; - PyObject *__pyx_v_t = NULL; - PyObject *__pyx_v_output = NULL; - PyObject *__pyx_v_xi = NULL; - PyObject *__pyx_v_x = NULL; - PyObject *__pyx_v_l = NULL; - PyObject *__pyx_v_v = NULL; - PyObject *__pyx_v_box = NULL; - PyObject *__pyx_v_i = NULL; - PyObject *__pyx_v_j = NULL; - PyObject *__pyx_v_conf = NULL; - PyObject *__pyx_v_n = NULL; - PyObject *__pyx_v_c = NULL; - PyObject *__pyx_v_boxes = NULL; - PyObject *__pyx_v_scores = NULL; - PyObject *__pyx_v_iou = NULL; - PyObject *__pyx_v_weights = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - Py_ssize_t __pyx_t_4; - Py_UCS4 __pyx_t_5; - long __pyx_t_6; - long __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - PyObject *(*__pyx_t_10)(PyObject *); - PyObject *__pyx_t_11 = NULL; - int __pyx_t_12; - Py_ssize_t __pyx_t_13; - PyObject *__pyx_t_14 = NULL; - PyObject *__pyx_t_15 = NULL; - PyObject *__pyx_t_16 = NULL; - PyObject *(*__pyx_t_17)(PyObject *); - PyObject *__pyx_t_18 = NULL; - PyObject *__pyx_t_19 = NULL; - PyObject *__pyx_t_20 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("non_max_suppression", 0); - __Pyx_INCREF(__pyx_v_multi_label); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":329 - * """ - * - * nc = prediction.shape[2] - 5 # number of classes # <<<<<<<<<<<<<< - * xc = prediction[..., 4] > conf_thres # candidates - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_prediction, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 329, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 329, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyInt_SubtractObjC(__pyx_t_2, __pyx_int_5, 5, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 329, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_nc = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":330 - * - * nc = prediction.shape[2] - 5 # number of classes - * xc = prediction[..., 4] > conf_thres # candidates # <<<<<<<<<<<<<< - * - * # Checks - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_prediction, __pyx_tuple__30); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 330, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_v_conf_thres, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 330, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_xc = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":333 - * - * # Checks - * assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' # <<<<<<<<<<<<<< - * assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - * - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - __pyx_t_2 = PyObject_RichCompare(__pyx_int_0, __pyx_v_conf_thres, Py_LE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 333, __pyx_L1_error) - if (__Pyx_PyObject_IsTrue(__pyx_t_2)) { - __Pyx_DECREF(__pyx_t_2); - __pyx_t_2 = PyObject_RichCompare(__pyx_v_conf_thres, __pyx_int_1, Py_LE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 333, __pyx_L1_error) - } - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 333, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_3)) { - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 333, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = 0; - __pyx_t_5 = 127; - __Pyx_INCREF(__pyx_kp_u_Invalid_Confidence_threshold); - __pyx_t_4 += 29; - __Pyx_GIVEREF(__pyx_kp_u_Invalid_Confidence_threshold); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_kp_u_Invalid_Confidence_threshold); - __pyx_t_1 = __Pyx_PyObject_FormatSimple(__pyx_v_conf_thres, __pyx_empty_unicode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 333, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_1) : __pyx_t_5; - __pyx_t_4 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1); - __pyx_t_1 = 0; - __Pyx_INCREF(__pyx_kp_u_valid_values_are_between_0_0_an); - __pyx_t_4 += 38; - __Pyx_GIVEREF(__pyx_kp_u_valid_values_are_between_0_0_an); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_kp_u_valid_values_are_between_0_0_an); - __pyx_t_1 = __Pyx_PyUnicode_Join(__pyx_t_2, 3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 333, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_Raise(__pyx_builtin_AssertionError, __pyx_t_1, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(0, 333, __pyx_L1_error) - } - } - #else - if ((1)); else __PYX_ERR(0, 333, __pyx_L1_error) - #endif - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":334 - * # Checks - * assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - * assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # <<<<<<<<<<<<<< - * - * # Settings - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - __pyx_t_1 = PyObject_RichCompare(__pyx_int_0, __pyx_v_iou_thres, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 334, __pyx_L1_error) - if (__Pyx_PyObject_IsTrue(__pyx_t_1)) { - __Pyx_DECREF(__pyx_t_1); - __pyx_t_1 = PyObject_RichCompare(__pyx_v_iou_thres, __pyx_int_1, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 334, __pyx_L1_error) - } - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 334, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_3)) { - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 334, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = 0; - __pyx_t_5 = 127; - __Pyx_INCREF(__pyx_kp_u_Invalid_IoU); - __pyx_t_4 += 12; - __Pyx_GIVEREF(__pyx_kp_u_Invalid_IoU); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_kp_u_Invalid_IoU); - __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_v_iou_thres, __pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 334, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) : __pyx_t_5; - __pyx_t_4 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_kp_u_valid_values_are_between_0_0_an); - __pyx_t_4 += 38; - __Pyx_GIVEREF(__pyx_kp_u_valid_values_are_between_0_0_an); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_kp_u_valid_values_are_between_0_0_an); - __pyx_t_2 = __Pyx_PyUnicode_Join(__pyx_t_1, 3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 334, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_Raise(__pyx_builtin_AssertionError, __pyx_t_2, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(0, 334, __pyx_L1_error) - } - } - #else - if ((1)); else __PYX_ERR(0, 334, __pyx_L1_error) - #endif - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":337 - * - * # Settings - * min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height # <<<<<<<<<<<<<< - * max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - * time_limit = 10.0 # seconds to quit after - */ - __pyx_t_6 = 2; - __pyx_t_7 = 0x1E00; - __pyx_v_min_wh = __pyx_t_6; - __pyx_v_max_wh = __pyx_t_7; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":338 - * # Settings - * min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height - * max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() # <<<<<<<<<<<<<< - * time_limit = 10.0 # seconds to quit after - * redundant = True # require redundant detections - */ - __pyx_v_max_nms = 0x7530; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":339 - * min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height - * max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - * time_limit = 10.0 # seconds to quit after # <<<<<<<<<<<<<< - * redundant = True # require redundant detections - * multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - */ - __pyx_v_time_limit = 10.0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":340 - * max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - * time_limit = 10.0 # seconds to quit after - * redundant = True # require redundant detections # <<<<<<<<<<<<<< - * multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - * merge = False # use merge-NMS - */ - __pyx_v_redundant = 1; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":341 - * time_limit = 10.0 # seconds to quit after - * redundant = True # require redundant detections - * multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) # <<<<<<<<<<<<<< - * merge = False # use merge-NMS - * - */ - __pyx_t_2 = PyObject_RichCompare(__pyx_v_nc, __pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 341, __pyx_L1_error) - __pyx_t_1 = PyNumber_InPlaceAnd(__pyx_v_multi_label, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 341, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_multi_label, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":342 - * redundant = True # require redundant detections - * multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - * merge = False # use merge-NMS # <<<<<<<<<<<<<< - * - * t = time.time() - */ - __pyx_v_merge = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":344 - * merge = False # use merge-NMS - * - * t = time.time() # <<<<<<<<<<<<<< - * output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] - * for xi, x in enumerate(prediction): # image index, image inference - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_time); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 344, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_time); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 344, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_2, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_9, 0+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 344, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __pyx_v_t = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":345 - * - * t = time.time() - * output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] # <<<<<<<<<<<<<< - * for xi, x in enumerate(prediction): # image index, image inference - * # Apply constraints - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_torch); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_prediction, __pyx_n_s_device); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_device, __pyx_t_2) < 0) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_8, __pyx_tuple__32, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_prediction, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - { PyObject* __pyx_temp = PyNumber_InPlaceMultiply(__pyx_t_1, __pyx_t_8); if (unlikely(!__pyx_temp)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_temp); - __Pyx_DECREF(__pyx_t_1); - __pyx_t_1 = __pyx_temp; - } - __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_v_output = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":346 - * t = time.time() - * output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] - * for xi, x in enumerate(prediction): # image index, image inference # <<<<<<<<<<<<<< - * # Apply constraints - * # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - */ - __Pyx_INCREF(__pyx_int_0); - __pyx_t_1 = __pyx_int_0; - if (likely(PyList_CheckExact(__pyx_v_prediction)) || PyTuple_CheckExact(__pyx_v_prediction)) { - __pyx_t_8 = __pyx_v_prediction; __Pyx_INCREF(__pyx_t_8); __pyx_t_4 = 0; - __pyx_t_10 = NULL; - } else { - __pyx_t_4 = -1; __pyx_t_8 = PyObject_GetIter(__pyx_v_prediction); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 346, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 346, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_10)) { - if (likely(PyList_CheckExact(__pyx_t_8))) { - if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_8)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_8, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(0, 346, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_8, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 346, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } else { - if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_8)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_8, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(0, 346, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_8, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 346, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } - } else { - __pyx_t_2 = __pyx_t_10(__pyx_t_8); - if (unlikely(!__pyx_t_2)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 346, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF_SET(__pyx_v_x, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_t_1); - __Pyx_XDECREF_SET(__pyx_v_xi, __pyx_t_1); - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_1, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 346, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); - __pyx_t_1 = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":349 - * # Apply constraints - * # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - * x = x[xc[xi]] # confidence # <<<<<<<<<<<<<< - * - * # Cat apriori labels if autolabelling - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_xc, __pyx_v_xi); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 349, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 349, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":352 - * - * # Cat apriori labels if autolabelling - * if labels and len(labels[xi]): # <<<<<<<<<<<<<< - * l = labels[xi] - * v = torch.zeros((len(l), nc + 5), device=x.device) - */ - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_v_labels); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 352, __pyx_L1_error) - if (__pyx_t_12) { - } else { - __pyx_t_3 = __pyx_t_12; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_v_xi); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 352, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_13 = PyObject_Length(__pyx_t_11); if (unlikely(__pyx_t_13 == ((Py_ssize_t)-1))) __PYX_ERR(0, 352, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_12 = (__pyx_t_13 != 0); - __pyx_t_3 = __pyx_t_12; - __pyx_L6_bool_binop_done:; - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":353 - * # Cat apriori labels if autolabelling - * if labels and len(labels[xi]): - * l = labels[xi] # <<<<<<<<<<<<<< - * v = torch.zeros((len(l), nc + 5), device=x.device) - * v[:, :4] = l[:, 1:5] # box - */ - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_v_xi); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 353, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_XDECREF_SET(__pyx_v_l, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":354 - * if labels and len(labels[xi]): - * l = labels[xi] - * v = torch.zeros((len(l), nc + 5), device=x.device) # <<<<<<<<<<<<<< - * v[:, :4] = l[:, 1:5] # box - * v[:, 4] = 1.0 # conf - */ - __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_n_s_torch); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_13 = PyObject_Length(__pyx_v_l); if (unlikely(__pyx_t_13 == ((Py_ssize_t)-1))) __PYX_ERR(0, 354, __pyx_L1_error) - __pyx_t_11 = PyInt_FromSsize_t(__pyx_t_13); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_14 = __Pyx_PyInt_AddObjC(__pyx_v_nc, __pyx_int_5, 5, 0, 0); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_15 = PyTuple_New(2); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_GIVEREF(__pyx_t_11); - PyTuple_SET_ITEM(__pyx_t_15, 0, __pyx_t_11); - __Pyx_GIVEREF(__pyx_t_14); - PyTuple_SET_ITEM(__pyx_t_15, 1, __pyx_t_14); - __pyx_t_11 = 0; - __pyx_t_14 = 0; - __pyx_t_14 = PyTuple_New(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_GIVEREF(__pyx_t_15); - PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_t_15); - __pyx_t_15 = 0; - __pyx_t_15 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_device); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - if (PyDict_SetItem(__pyx_t_15, __pyx_n_s_device, __pyx_t_11) < 0) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_14, __pyx_t_15); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 354, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":355 - * l = labels[xi] - * v = torch.zeros((len(l), nc + 5), device=x.device) - * v[:, :4] = l[:, 1:5] # box # <<<<<<<<<<<<<< - * v[:, 4] = 1.0 # conf - * v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - */ - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_l, __pyx_tuple__34); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 355, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - if (unlikely((PyObject_SetItem(__pyx_v_v, __pyx_tuple__29, __pyx_t_11) < 0))) __PYX_ERR(0, 355, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":356 - * v = torch.zeros((len(l), nc + 5), device=x.device) - * v[:, :4] = l[:, 1:5] # box - * v[:, 4] = 1.0 # conf # <<<<<<<<<<<<<< - * v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - * x = torch.cat((x, v), 0) - */ - if (unlikely((PyObject_SetItem(__pyx_v_v, __pyx_tuple__35, __pyx_float_1_0) < 0))) __PYX_ERR(0, 356, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":357 - * v[:, :4] = l[:, 1:5] # box - * v[:, 4] = 1.0 # conf - * v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls # <<<<<<<<<<<<<< - * x = torch.cat((x, v), 0) - * - */ - __pyx_t_13 = PyObject_Length(__pyx_v_l); if (unlikely(__pyx_t_13 == ((Py_ssize_t)-1))) __PYX_ERR(0, 357, __pyx_L1_error) - __pyx_t_11 = PyInt_FromSsize_t(__pyx_t_13); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_15 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_11); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_l, __pyx_tuple__24); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_long); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_14, }; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_9, 0+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_11, __pyx_int_5, 5, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_GIVEREF(__pyx_t_15); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_15); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_2); - __pyx_t_15 = 0; - __pyx_t_2 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_v, __pyx_t_11, __pyx_float_1_0) < 0))) __PYX_ERR(0, 357, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":358 - * v[:, 4] = 1.0 # conf - * v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - * x = torch.cat((x, v), 0) # <<<<<<<<<<<<<< - * - * # If none remain process next image - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torch); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 358, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_cat); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 358, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 358, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_x); - __Pyx_GIVEREF(__pyx_v_x); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_x); - __Pyx_INCREF(__pyx_v_v); - __Pyx_GIVEREF(__pyx_v_v); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_v); - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_15))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_15); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_15); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_15, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_14, __pyx_t_2, __pyx_int_0}; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_15, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 358, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - } - __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":352 - * - * # Cat apriori labels if autolabelling - * if labels and len(labels[xi]): # <<<<<<<<<<<<<< - * l = labels[xi] - * v = torch.zeros((len(l), nc + 5), device=x.device) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":361 - * - * # If none remain process next image - * if not x.shape[0]: # <<<<<<<<<<<<<< - * continue - * - */ - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 361, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_15 = __Pyx_GetItemInt(__pyx_t_11, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 361, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_15); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 361, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - __pyx_t_12 = ((!__pyx_t_3) != 0); - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":362 - * # If none remain process next image - * if not x.shape[0]: - * continue # <<<<<<<<<<<<<< - * - * # Compute conf - */ - goto __pyx_L3_continue; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":361 - * - * # If none remain process next image - * if not x.shape[0]: # <<<<<<<<<<<<<< - * continue - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":365 - * - * # Compute conf - * x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # <<<<<<<<<<<<<< - * - * # Box (center x, center y, width, height) to (x1, y1, x2, y2) - */ - __Pyx_INCREF(__pyx_tuple__37); - __pyx_t_16 = __pyx_tuple__37; - __pyx_t_15 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_t_16); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__39); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = PyNumber_InPlaceMultiply(__pyx_t_15, __pyx_t_11); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_x, __pyx_t_16, __pyx_t_2) < 0))) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":368 - * - * # Box (center x, center y, width, height) to (x1, y1, x2, y2) - * box = xywh2xyxy(x[:, :4]) # <<<<<<<<<<<<<< - * - * # Detections matrix nx6 (xyxy, conf, cls) - */ - __Pyx_GetModuleGlobalName(__pyx_t_11, __pyx_n_s_xywh2xyxy); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 368, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_15 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__29); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 368, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_11); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_11, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_14, __pyx_t_15}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_11, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 368, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_box, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":371 - * - * # Detections matrix nx6 (xyxy, conf, cls) - * if multi_label: # <<<<<<<<<<<<<< - * i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - */ - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_v_multi_label); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 371, __pyx_L1_error) - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":372 - * # Detections matrix nx6 (xyxy, conf, cls) - * if multi_label: - * i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T # <<<<<<<<<<<<<< - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - * else: # best class only - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__37); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = PyObject_RichCompare(__pyx_t_2, __pyx_v_conf_thres, Py_GT); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_nonzero); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_as_tuple, Py_False) < 0) __PYX_ERR(0, 372, __pyx_L1_error) - __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_empty_tuple, __pyx_t_11); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_15, __pyx_n_s_T); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_11))) || (PyList_CheckExact(__pyx_t_11))) { - PyObject* sequence = __pyx_t_11; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 372, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_15 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); - } else { - __pyx_t_15 = PyList_GET_ITEM(sequence, 0); - __pyx_t_2 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_15); - __Pyx_INCREF(__pyx_t_2); - #else - __pyx_t_15 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_14 = PyObject_GetIter(__pyx_t_11); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 372, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_17 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_14); - index = 0; __pyx_t_15 = __pyx_t_17(__pyx_t_14); if (unlikely(!__pyx_t_15)) goto __pyx_L10_unpacking_failed; - __Pyx_GOTREF(__pyx_t_15); - index = 1; __pyx_t_2 = __pyx_t_17(__pyx_t_14); if (unlikely(!__pyx_t_2)) goto __pyx_L10_unpacking_failed; - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_17(__pyx_t_14), 2) < 0) __PYX_ERR(0, 372, __pyx_L1_error) - __pyx_t_17 = NULL; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - goto __pyx_L11_unpacking_done; - __pyx_L10_unpacking_failed:; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_17 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 372, __pyx_L1_error) - __pyx_L11_unpacking_done:; - } - __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_15); - __pyx_t_15 = 0; - __Pyx_XDECREF_SET(__pyx_v_j, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":373 - * if multi_label: - * i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) # <<<<<<<<<<<<<< - * else: # best class only - * conf, j = x[:, 5:].max(1, keepdim=True) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torch); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_cat); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_box, __pyx_v_i); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_14 = __Pyx_PyInt_AddObjC(__pyx_v_j, __pyx_int_5, 5, 0, 0); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_18 = PyTuple_New(3); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_v_i); - __Pyx_GIVEREF(__pyx_t_14); - PyTuple_SET_ITEM(__pyx_t_18, 1, __pyx_t_14); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_18, 2, Py_None); - __pyx_t_14 = 0; - __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_t_18); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __pyx_t_19 = __Pyx_PyObject_GetItem(__pyx_v_j, __pyx_tuple__40); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_19); - __pyx_t_20 = __Pyx_PyObject_GetAttrStr(__pyx_t_19, __pyx_n_s_float); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; - __pyx_t_19 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_20))) { - __pyx_t_19 = PyMethod_GET_SELF(__pyx_t_20); - if (likely(__pyx_t_19)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_20); - __Pyx_INCREF(__pyx_t_19); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_20, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_19, }; - __pyx_t_18 = __Pyx_PyObject_FastCall(__pyx_t_20, __pyx_callargs+1-__pyx_t_9, 0+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_19); __pyx_t_19 = 0; - if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - } - __pyx_t_20 = PyTuple_New(3); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_20, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_14); - PyTuple_SET_ITEM(__pyx_t_20, 1, __pyx_t_14); - __Pyx_GIVEREF(__pyx_t_18); - PyTuple_SET_ITEM(__pyx_t_20, 2, __pyx_t_18); - __pyx_t_2 = 0; - __pyx_t_14 = 0; - __pyx_t_18 = 0; - __pyx_t_18 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_15))) { - __pyx_t_18 = PyMethod_GET_SELF(__pyx_t_15); - if (likely(__pyx_t_18)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_15); - __Pyx_INCREF(__pyx_t_18); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_15, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_18, __pyx_t_20, __pyx_int_1}; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_15, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0; - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - } - __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":371 - * - * # Detections matrix nx6 (xyxy, conf, cls) - * if multi_label: # <<<<<<<<<<<<<< - * i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - */ - goto __pyx_L9; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":375 - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - * else: # best class only - * conf, j = x[:, 5:].max(1, keepdim=True) # <<<<<<<<<<<<<< - * x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - * - */ - /*else*/ { - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__37); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_11, __pyx_n_s_max); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_keepdim, Py_True) < 0) __PYX_ERR(0, 375, __pyx_L1_error) - __pyx_t_20 = __Pyx_PyObject_Call(__pyx_t_15, __pyx_tuple__41, __pyx_t_11); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_20))) || (PyList_CheckExact(__pyx_t_20))) { - PyObject* sequence = __pyx_t_20; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 375, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_11 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_15 = PyTuple_GET_ITEM(sequence, 1); - } else { - __pyx_t_11 = PyList_GET_ITEM(sequence, 0); - __pyx_t_15 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_11); - __Pyx_INCREF(__pyx_t_15); - #else - __pyx_t_11 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_15 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - #endif - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_18 = PyObject_GetIter(__pyx_t_20); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - __pyx_t_17 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_18); - index = 0; __pyx_t_11 = __pyx_t_17(__pyx_t_18); if (unlikely(!__pyx_t_11)) goto __pyx_L12_unpacking_failed; - __Pyx_GOTREF(__pyx_t_11); - index = 1; __pyx_t_15 = __pyx_t_17(__pyx_t_18); if (unlikely(!__pyx_t_15)) goto __pyx_L12_unpacking_failed; - __Pyx_GOTREF(__pyx_t_15); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_17(__pyx_t_18), 2) < 0) __PYX_ERR(0, 375, __pyx_L1_error) - __pyx_t_17 = NULL; - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - goto __pyx_L13_unpacking_done; - __pyx_L12_unpacking_failed:; - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __pyx_t_17 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 375, __pyx_L1_error) - __pyx_L13_unpacking_done:; - } - __Pyx_XDECREF_SET(__pyx_v_conf, __pyx_t_11); - __pyx_t_11 = 0; - __Pyx_XDECREF_SET(__pyx_v_j, __pyx_t_15); - __pyx_t_15 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":376 - * else: # best class only - * conf, j = x[:, 5:].max(1, keepdim=True) - * x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] # <<<<<<<<<<<<<< - * - * # Filter by class - */ - __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_n_s_torch); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_15, __pyx_n_s_cat); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_v_j, __pyx_n_s_float); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_18))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_18); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_18); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_18, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_14, }; - __pyx_t_15 = __Pyx_PyObject_FastCall(__pyx_t_18, __pyx_callargs+1-__pyx_t_9, 0+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - } - __pyx_t_18 = PyTuple_New(3); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_INCREF(__pyx_v_box); - __Pyx_GIVEREF(__pyx_v_box); - PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_v_box); - __Pyx_INCREF(__pyx_v_conf); - __Pyx_GIVEREF(__pyx_v_conf); - PyTuple_SET_ITEM(__pyx_t_18, 1, __pyx_v_conf); - __Pyx_GIVEREF(__pyx_t_15); - PyTuple_SET_ITEM(__pyx_t_18, 2, __pyx_t_15); - __pyx_t_15 = 0; - __pyx_t_15 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { - __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_11); - if (likely(__pyx_t_15)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); - __Pyx_INCREF(__pyx_t_15); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_11, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_15, __pyx_t_18, __pyx_int_1}; - __pyx_t_20 = __Pyx_PyObject_FastCall(__pyx_t_11, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } - __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_v_conf, __pyx_n_s_view); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __pyx_t_15 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_18))) { - __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_18); - if (likely(__pyx_t_15)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_18); - __Pyx_INCREF(__pyx_t_15); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_18, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_15, __pyx_int_neg_1}; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_18, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - } - __pyx_t_18 = PyObject_RichCompare(__pyx_t_11, __pyx_v_conf_thres, Py_GT); __Pyx_XGOTREF(__pyx_t_18); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_t_20, __pyx_t_18); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 376, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_11); - __pyx_t_11 = 0; - } - __pyx_L9:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":379 - * - * # Filter by class - * if classes is not None: # <<<<<<<<<<<<<< - * x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - * - */ - __pyx_t_12 = (__pyx_v_classes != Py_None); - __pyx_t_3 = (__pyx_t_12 != 0); - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":380 - * # Filter by class - * if classes is not None: - * x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # <<<<<<<<<<<<<< - * - * # Apply finite constraint - */ - __pyx_t_18 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__43); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_GetModuleGlobalName(__pyx_t_20, __pyx_n_s_torch); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_20, __pyx_n_s_tensor); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - __pyx_t_20 = PyTuple_New(1); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_INCREF(__pyx_v_classes); - __Pyx_GIVEREF(__pyx_v_classes); - PyTuple_SET_ITEM(__pyx_t_20, 0, __pyx_v_classes); - __pyx_t_14 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_device); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_14, __pyx_n_s_device, __pyx_t_2) < 0) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_15, __pyx_t_20, __pyx_t_14); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = PyObject_RichCompare(__pyx_t_18, __pyx_t_2, Py_EQ); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_any); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_14, __pyx_int_1}; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_t_11); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":379 - * - * # Filter by class - * if classes is not None: # <<<<<<<<<<<<<< - * x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":387 - * - * # Check shape - * n = x.shape[0] # number of boxes # <<<<<<<<<<<<<< - * if not n: # no boxes - * continue - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_x, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 387, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 387, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_n, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":388 - * # Check shape - * n = x.shape[0] # number of boxes - * if not n: # no boxes # <<<<<<<<<<<<<< - * continue - * elif n > max_nms: # excess boxes - */ - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_n); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 388, __pyx_L1_error) - __pyx_t_12 = ((!__pyx_t_3) != 0); - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":389 - * n = x.shape[0] # number of boxes - * if not n: # no boxes - * continue # <<<<<<<<<<<<<< - * elif n > max_nms: # excess boxes - * x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - */ - goto __pyx_L3_continue; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":388 - * # Check shape - * n = x.shape[0] # number of boxes - * if not n: # no boxes # <<<<<<<<<<<<<< - * continue - * elif n > max_nms: # excess boxes - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":390 - * if not n: # no boxes - * continue - * elif n > max_nms: # excess boxes # <<<<<<<<<<<<<< - * x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - * - */ - __pyx_t_11 = __Pyx_PyInt_From_long(__pyx_v_max_nms); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 390, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = PyObject_RichCompare(__pyx_v_n, __pyx_t_11, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 390, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 390, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":391 - * continue - * elif n > max_nms: # excess boxes - * x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence # <<<<<<<<<<<<<< - * - * # Batched NMS - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__35); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_argsort); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 391, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_descending, Py_True) < 0) __PYX_ERR(0, 391, __pyx_L1_error) - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 391, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_t_14, 0, __pyx_v_max_nms, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 391, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_t_2); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 391, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_x, __pyx_t_14); - __pyx_t_14 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":390 - * if not n: # no boxes - * continue - * elif n > max_nms: # excess boxes # <<<<<<<<<<<<<< - * x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":394 - * - * # Batched NMS - * c = x[:, 5:6] * (0 if agnostic else max_wh) # classes # <<<<<<<<<<<<<< - * boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - * i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - */ - __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__43); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 394, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_v_agnostic); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 394, __pyx_L1_error) - if (__pyx_t_12) { - __Pyx_INCREF(__pyx_int_0); - __pyx_t_2 = __pyx_int_0; - } else { - __pyx_t_11 = __Pyx_PyInt_From_long(__pyx_v_max_wh); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 394, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = __pyx_t_11; - __pyx_t_11 = 0; - } - __pyx_t_11 = PyNumber_Multiply(__pyx_t_14, __pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 394, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_c, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":395 - * # Batched NMS - * c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - * boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores # <<<<<<<<<<<<<< - * i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - * if i.shape[0] > max_det: # limit detections - */ - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__29); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 395, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = PyNumber_Add(__pyx_t_11, __pyx_v_c); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 395, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__35); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 395, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_XDECREF_SET(__pyx_v_boxes, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_scores, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":396 - * c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - * boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - * i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS # <<<<<<<<<<<<<< - * if i.shape[0] > max_det: # limit detections - * i = i[:max_det] - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_torchvision); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 396, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_ops); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 396, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_nms); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 396, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[4] = {__pyx_t_14, __pyx_v_boxes, __pyx_v_scores, __pyx_v_iou_thres}; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_9, 3+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 396, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":397 - * boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - * i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - * if i.shape[0] > max_det: # limit detections # <<<<<<<<<<<<<< - * i = i[:max_det] - * if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - */ - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_i, __pyx_n_s_shape); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_11, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_11 = PyObject_RichCompare(__pyx_t_2, __pyx_v_max_det, Py_GT); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 397, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_11); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 397, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":398 - * i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - * if i.shape[0] > max_det: # limit detections - * i = i[:max_det] # <<<<<<<<<<<<<< - * if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - * # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - */ - __pyx_t_11 = __Pyx_PyObject_GetSlice(__pyx_v_i, 0, 0, NULL, &__pyx_v_max_det, NULL, 0, 0, 1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 398, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF_SET(__pyx_v_i, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":397 - * boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - * i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - * if i.shape[0] > max_det: # limit detections # <<<<<<<<<<<<<< - * i = i[:max_det] - * if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":399 - * if i.shape[0] > max_det: # limit detections - * i = i[:max_det] - * if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # <<<<<<<<<<<<<< - * # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - * iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - */ - __pyx_t_3 = (__pyx_v_merge != 0); - if (__pyx_t_3) { - } else { - __pyx_t_12 = __pyx_t_3; - goto __pyx_L18_bool_binop_done; - } - __pyx_t_11 = PyObject_RichCompare(__pyx_int_1, __pyx_v_n, Py_LT); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 399, __pyx_L1_error) - if (__Pyx_PyObject_IsTrue(__pyx_t_11)) { - __Pyx_DECREF(__pyx_t_11); - __pyx_t_11 = PyObject_RichCompare(__pyx_v_n, __pyx_float_3E3, Py_LT); __Pyx_XGOTREF(__pyx_t_11); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 399, __pyx_L1_error) - } - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_11); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 399, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_12 = __pyx_t_3; - __pyx_L18_bool_binop_done:; - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":401 - * if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - * # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - * iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix # <<<<<<<<<<<<<< - * weights = iou * scores[None] # box weights - * x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_box_iou); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_v_i); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 401, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_18 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_18 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_18)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_18); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_18, __pyx_t_14, __pyx_v_boxes}; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 401, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyObject_RichCompare(__pyx_t_11, __pyx_v_iou_thres, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 401, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_XDECREF_SET(__pyx_v_iou, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":402 - * # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - * iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - * weights = iou * scores[None] # box weights # <<<<<<<<<<<<<< - * x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - * if redundant: - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_scores, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 402, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = PyNumber_Multiply(__pyx_v_iou, __pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 402, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_weights, __pyx_t_11); - __pyx_t_11 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":403 - * iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - * weights = iou * scores[None] # box weights - * x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes # <<<<<<<<<<<<<< - * if redundant: - * i = i[iou.sum(1) > 1] # require redundancy - */ - __Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_n_s_torch); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_mm); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_tuple__29); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_20 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_18))) { - __pyx_t_20 = PyMethod_GET_SELF(__pyx_t_18); - if (likely(__pyx_t_20)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_18); - __Pyx_INCREF(__pyx_t_20); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_18, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_20, __pyx_v_weights, __pyx_t_14}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_18, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_20); __pyx_t_20 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - } - __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_float); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_18))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_18); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_18); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_18, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_2, }; - __pyx_t_11 = __Pyx_PyObject_FastCall(__pyx_t_18, __pyx_callargs+1-__pyx_t_9, 0+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - } - __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_v_weights, __pyx_n_s_sum); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_keepdim, Py_True) < 0) __PYX_ERR(0, 403, __pyx_L1_error) - __pyx_t_14 = __Pyx_PyObject_Call(__pyx_t_18, __pyx_tuple__41, __pyx_t_2); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_11, __pyx_t_14); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = PyTuple_New(2); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_14, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_slice__28); - __Pyx_GIVEREF(__pyx_slice__28); - PyTuple_SET_ITEM(__pyx_t_14, 1, __pyx_slice__28); - if (unlikely((PyObject_SetItem(__pyx_v_x, __pyx_t_14, __pyx_t_2) < 0))) __PYX_ERR(0, 403, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":404 - * weights = iou * scores[None] # box weights - * x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - * if redundant: # <<<<<<<<<<<<<< - * i = i[iou.sum(1) > 1] # require redundancy - * - */ - __pyx_t_12 = (__pyx_v_redundant != 0); - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":405 - * x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - * if redundant: - * i = i[iou.sum(1) > 1] # require redundancy # <<<<<<<<<<<<<< - * - * output[xi] = x[i] - */ - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_v_iou, __pyx_n_s_sum); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 405, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_11 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_14))) { - __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_14); - if (likely(__pyx_t_11)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14); - __Pyx_INCREF(__pyx_t_11); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_14, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_int_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_14, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 405, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - } - __pyx_t_14 = PyObject_RichCompare(__pyx_t_2, __pyx_int_1, Py_GT); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 405, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_i, __pyx_t_14); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 405, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF_SET(__pyx_v_i, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":404 - * weights = iou * scores[None] # box weights - * x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - * if redundant: # <<<<<<<<<<<<<< - * i = i[iou.sum(1) > 1] # require redundancy - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":399 - * if i.shape[0] > max_det: # limit detections - * i = i[:max_det] - * if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # <<<<<<<<<<<<<< - * # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - * iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":407 - * i = i[iou.sum(1) > 1] # require redundancy - * - * output[xi] = x[i] # <<<<<<<<<<<<<< - * if (time.time() - t) > time_limit: - * LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_x, __pyx_v_i); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (unlikely((PyObject_SetItem(__pyx_v_output, __pyx_v_xi, __pyx_t_2) < 0))) __PYX_ERR(0, 407, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":408 - * - * output[xi] = x[i] - * if (time.time() - t) > time_limit: # <<<<<<<<<<<<<< - * LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') - * break # time limit exceeded - */ - __Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_n_s_time); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_14, __pyx_n_s_time); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __pyx_t_14 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { - __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_11); - if (likely(__pyx_t_14)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); - __Pyx_INCREF(__pyx_t_14); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_11, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_14, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_11, __pyx_callargs+1-__pyx_t_9, 0+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } - __pyx_t_11 = PyNumber_Subtract(__pyx_t_2, __pyx_v_t); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyFloat_FromDouble(__pyx_v_time_limit); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_14 = PyObject_RichCompare(__pyx_t_11, __pyx_t_2, Py_GT); __Pyx_XGOTREF(__pyx_t_14); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_14); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 408, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (__pyx_t_12) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":409 - * output[xi] = x[i] - * if (time.time() - t) > time_limit: - * LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') # <<<<<<<<<<<<<< - * break # time limit exceeded - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_LOGGER); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_warning); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_13 = 0; - __pyx_t_5 = 127; - __Pyx_INCREF(__pyx_kp_u_WARNING_NMS_time_limit); - __pyx_t_13 += 24; - __Pyx_GIVEREF(__pyx_kp_u_WARNING_NMS_time_limit); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_kp_u_WARNING_NMS_time_limit); - __pyx_t_18 = PyFloat_FromDouble(__pyx_v_time_limit); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_18); - __pyx_t_20 = __Pyx_PyObject_FormatSimple(__pyx_t_18, __pyx_empty_unicode); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; - __pyx_t_5 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_20) > __pyx_t_5) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_20) : __pyx_t_5; - __pyx_t_13 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_20); - __Pyx_GIVEREF(__pyx_t_20); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_20); - __pyx_t_20 = 0; - __Pyx_INCREF(__pyx_kp_u_s_exceeded); - __pyx_t_13 += 10; - __Pyx_GIVEREF(__pyx_kp_u_s_exceeded); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_kp_u_s_exceeded); - __pyx_t_20 = __Pyx_PyUnicode_Join(__pyx_t_2, 3, __pyx_t_13, __pyx_t_5); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_20); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_11); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_11, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_t_20}; - __pyx_t_14 = __Pyx_PyObject_FastCall(__pyx_t_11, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; - if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 409, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":410 - * if (time.time() - t) > time_limit: - * LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') - * break # time limit exceeded # <<<<<<<<<<<<<< - * - * return output - */ - goto __pyx_L4_break; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":408 - * - * output[xi] = x[i] - * if (time.time() - t) > time_limit: # <<<<<<<<<<<<<< - * LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') - * break # time limit exceeded - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":346 - * t = time.time() - * output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] - * for xi, x in enumerate(prediction): # image index, image inference # <<<<<<<<<<<<<< - * # Apply constraints - * # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - */ - __pyx_L3_continue:; - } - __pyx_L4_break:; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":412 - * break # time limit exceeded - * - * return output # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_output); - __pyx_r = __pyx_v_output; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":321 - * - * - * def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, # <<<<<<<<<<<<<< - * labels=(), max_det=300): - * """Runs Non-Maximum Suppression (NMS) on inference results - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_XDECREF(__pyx_t_14); - __Pyx_XDECREF(__pyx_t_15); - __Pyx_XDECREF(__pyx_t_16); - __Pyx_XDECREF(__pyx_t_18); - __Pyx_XDECREF(__pyx_t_19); - __Pyx_XDECREF(__pyx_t_20); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.non_max_suppression", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_nc); - __Pyx_XDECREF(__pyx_v_xc); - __Pyx_XDECREF(__pyx_v_t); - __Pyx_XDECREF(__pyx_v_output); - __Pyx_XDECREF(__pyx_v_xi); - __Pyx_XDECREF(__pyx_v_x); - __Pyx_XDECREF(__pyx_v_l); - __Pyx_XDECREF(__pyx_v_v); - __Pyx_XDECREF(__pyx_v_box); - __Pyx_XDECREF(__pyx_v_i); - __Pyx_XDECREF(__pyx_v_j); - __Pyx_XDECREF(__pyx_v_conf); - __Pyx_XDECREF(__pyx_v_n); - __Pyx_XDECREF(__pyx_v_c); - __Pyx_XDECREF(__pyx_v_boxes); - __Pyx_XDECREF(__pyx_v_scores); - __Pyx_XDECREF(__pyx_v_iou); - __Pyx_XDECREF(__pyx_v_weights); - __Pyx_XDECREF(__pyx_v_multi_label); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/general.py":415 - * - * - * def increment_path(path, exist_ok=False, sep='', mkdir=False): # <<<<<<<<<<<<<< - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_63increment_path(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_63increment_path = {"increment_path", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_63increment_path, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_63increment_path(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_path = 0; - PyObject *__pyx_v_exist_ok = 0; - PyObject *__pyx_v_sep = 0; - PyObject *__pyx_v_mkdir = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("increment_path (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_path,&__pyx_n_s_exist_ok,&__pyx_n_s_sep,&__pyx_n_s_mkdir,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_path,&__pyx_n_s_exist_ok,&__pyx_n_s_sep,&__pyx_n_s_mkdir,0}; - #endif - PyObject* values[4] = {0,0,0,0}; - values[1] = ((PyObject *)((PyObject *)Py_False)); - values[2] = ((PyObject *)((PyObject*)__pyx_kp_u__11)); - values[3] = ((PyObject *)((PyObject *)Py_False)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_path)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 415, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_exist_ok); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 415, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_sep); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 415, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_mkdir); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 415, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "increment_path") < 0)) __PYX_ERR(0, 415, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_path = values[0]; - __pyx_v_exist_ok = values[1]; - __pyx_v_sep = values[2]; - __pyx_v_mkdir = values[3]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("increment_path", 0, 1, 4, __pyx_nargs); __PYX_ERR(0, 415, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.increment_path", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_62increment_path(__pyx_self, __pyx_v_path, __pyx_v_exist_ok, __pyx_v_sep, __pyx_v_mkdir); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_62increment_path(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_path, PyObject *__pyx_v_exist_ok, PyObject *__pyx_v_sep, PyObject *__pyx_v_mkdir) { - PyObject *__pyx_v_suffix = NULL; - PyObject *__pyx_v_dirs = NULL; - PyObject *__pyx_v_matches = NULL; - PyObject *__pyx_v_i = NULL; - PyObject *__pyx_v_n = NULL; - PyObject *__pyx_8genexpr8__pyx_v_d = NULL; - PyObject *__pyx_8genexpr9__pyx_v_m = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - Py_UCS4 __pyx_t_10; - PyObject *(*__pyx_t_11)(PyObject *); - PyObject *__pyx_t_12 = NULL; - Py_ssize_t __pyx_t_13; - PyObject *__pyx_t_14 = NULL; - PyObject *__pyx_t_15 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("increment_path", 0); - __Pyx_INCREF(__pyx_v_path); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":417 - * def increment_path(path, exist_ok=False, sep='', mkdir=False): - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic # <<<<<<<<<<<<<< - * if path.exists() and not exist_ok: - * path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Path); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 417, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_path}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 417, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF_SET(__pyx_v_path, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":418 - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic - * if path.exists() and not exist_ok: # <<<<<<<<<<<<<< - * path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - * dirs = glob.glob(f"{path}{sep}*") # similar paths - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_exists); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_3, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(0, 418, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_6) { - } else { - __pyx_t_5 = __pyx_t_6; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_v_exist_ok); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(0, 418, __pyx_L1_error) - __pyx_t_7 = ((!__pyx_t_6) != 0); - __pyx_t_5 = __pyx_t_7; - __pyx_L4_bool_binop_done:; - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":419 - * path = Path(path) # os-agnostic - * if path.exists() and not exist_ok: - * path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') # <<<<<<<<<<<<<< - * dirs = glob.glob(f"{path}{sep}*") # similar paths - * matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_is_file); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_8, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_5) { - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_with_suffix); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_8, __pyx_kp_u__11}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_suffix); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_3); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_1 = __pyx_t_8; - __pyx_t_8 = 0; - } else { - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_path); - __Pyx_GIVEREF(__pyx_v_path); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_path); - __Pyx_INCREF(__pyx_kp_u__11); - __Pyx_GIVEREF(__pyx_kp_u__11); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_kp_u__11); - __pyx_t_1 = __pyx_t_8; - __pyx_t_8 = 0; - } - if (likely(__pyx_t_1 != Py_None)) { - PyObject* sequence = __pyx_t_1; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 419, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_8 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(__pyx_t_3); - #else - __pyx_t_8 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 419, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - #endif - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(0, 419, __pyx_L1_error) - } - __Pyx_DECREF_SET(__pyx_v_path, __pyx_t_8); - __pyx_t_8 = 0; - __pyx_v_suffix = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":420 - * if path.exists() and not exist_ok: - * path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - * dirs = glob.glob(f"{path}{sep}*") # similar paths # <<<<<<<<<<<<<< - * matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - * i = [int(m.groups()[0]) for m in matches if m] # indices - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_glob); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_glob); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = 0; - __pyx_t_10 = 127; - __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_v_path, __pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) : __pyx_t_10; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_FormatSimple(__pyx_v_sep, __pyx_empty_unicode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_2) : __pyx_t_10; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_kp_u__9); - __pyx_t_9 += 1; - __Pyx_GIVEREF(__pyx_kp_u__9); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__9); - __pyx_t_2 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __pyx_v_dirs = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":421 - * path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - * dirs = glob.glob(f"{path}{sep}*") # similar paths - * matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] # <<<<<<<<<<<<<< - * i = [int(m.groups()[0]) for m in matches if m] # indices - * n = max(i) + 1 if i else 2 # increment number - */ - { /* enter inner scope */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_1); - if (likely(PyList_CheckExact(__pyx_v_dirs)) || PyTuple_CheckExact(__pyx_v_dirs)) { - __pyx_t_8 = __pyx_v_dirs; __Pyx_INCREF(__pyx_t_8); __pyx_t_9 = 0; - __pyx_t_11 = NULL; - } else { - __pyx_t_9 = -1; __pyx_t_8 = PyObject_GetIter(__pyx_v_dirs); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_11 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_8); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 421, __pyx_L8_error) - } - for (;;) { - if (likely(!__pyx_t_11)) { - if (likely(PyList_CheckExact(__pyx_t_8))) { - if (__pyx_t_9 >= PyList_GET_SIZE(__pyx_t_8)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_2); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 421, __pyx_L8_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_8, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } else { - if (__pyx_t_9 >= PyTuple_GET_SIZE(__pyx_t_8)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_2); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 421, __pyx_L8_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_8, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } - } else { - __pyx_t_2 = __pyx_t_11(__pyx_t_8); - if (unlikely(!__pyx_t_2)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 421, __pyx_L8_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF_SET(__pyx_8genexpr8__pyx_v_d, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_re); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_search); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_13 = 0; - __pyx_t_10 = 127; - __Pyx_INCREF(__pyx_kp_u_s_2); - __pyx_t_13 += 2; - __Pyx_GIVEREF(__pyx_kp_u_s_2); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_s_2); - __pyx_t_14 = __Pyx_PyObject_FormatSimple(__pyx_v_sep, __pyx_empty_unicode); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_14); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_14) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_14) : __pyx_t_10; - __pyx_t_13 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_14); - __Pyx_GIVEREF(__pyx_t_14); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_14); - __pyx_t_14 = 0; - __Pyx_INCREF(__pyx_kp_u_d); - __pyx_t_13 += 5; - __Pyx_GIVEREF(__pyx_kp_u_d); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u_d); - __pyx_t_14 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_13, __pyx_t_10); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_stem); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_15 = __Pyx_PyUnicode_FormatSafe(__pyx_t_14, __pyx_t_3); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_15); - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_12))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_12); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_12); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_12, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_t_15, __pyx_8genexpr8__pyx_v_d}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_12, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - } - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_2))) __PYX_ERR(0, 421, __pyx_L8_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_8genexpr8__pyx_v_d); __pyx_8genexpr8__pyx_v_d = 0; - goto __pyx_L11_exit_scope; - __pyx_L8_error:; - __Pyx_XDECREF(__pyx_8genexpr8__pyx_v_d); __pyx_8genexpr8__pyx_v_d = 0; - goto __pyx_L1_error; - __pyx_L11_exit_scope:; - } /* exit inner scope */ - __pyx_v_matches = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":422 - * dirs = glob.glob(f"{path}{sep}*") # similar paths - * matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - * i = [int(m.groups()[0]) for m in matches if m] # indices # <<<<<<<<<<<<<< - * n = max(i) + 1 if i else 2 # increment number - * path = Path(f"{path}{sep}{n}{suffix}") # increment path - */ - { /* enter inner scope */ - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_8 = __pyx_v_matches; __Pyx_INCREF(__pyx_t_8); __pyx_t_9 = 0; - for (;;) { - if (__pyx_t_9 >= PyList_GET_SIZE(__pyx_t_8)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_8, __pyx_t_9); __Pyx_INCREF(__pyx_t_2); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 422, __pyx_L14_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_8, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - __Pyx_XDECREF_SET(__pyx_8genexpr9__pyx_v_m, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_8genexpr9__pyx_v_m); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 422, __pyx_L14_error) - if (__pyx_t_5) { - __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_8genexpr9__pyx_v_m, __pyx_n_s_groups); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_15 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_12))) { - __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_12); - if (likely(__pyx_t_15)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_12); - __Pyx_INCREF(__pyx_t_15); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_12, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_15, }; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_12, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - } - __pyx_t_12 = __Pyx_GetItemInt(__pyx_t_2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_t_12); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_2))) __PYX_ERR(0, 422, __pyx_L14_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - } - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF(__pyx_8genexpr9__pyx_v_m); __pyx_8genexpr9__pyx_v_m = 0; - goto __pyx_L18_exit_scope; - __pyx_L14_error:; - __Pyx_XDECREF(__pyx_8genexpr9__pyx_v_m); __pyx_8genexpr9__pyx_v_m = 0; - goto __pyx_L1_error; - __pyx_L18_exit_scope:; - } /* exit inner scope */ - __pyx_v_i = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":423 - * matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - * i = [int(m.groups()[0]) for m in matches if m] # indices - * n = max(i) + 1 if i else 2 # increment number # <<<<<<<<<<<<<< - * path = Path(f"{path}{sep}{n}{suffix}") # increment path - * if mkdir: - */ - __pyx_t_5 = (PyList_GET_SIZE(__pyx_v_i) != 0); - if (__pyx_t_5) { - __pyx_t_8 = __Pyx_PyObject_CallOneArg(__pyx_builtin_max, __pyx_v_i); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 423, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_2 = __Pyx_PyInt_AddObjC(__pyx_t_8, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 423, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_1 = __pyx_t_2; - __pyx_t_2 = 0; - } else { - __Pyx_INCREF(__pyx_int_2); - __pyx_t_1 = __pyx_int_2; - } - __pyx_v_n = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":424 - * i = [int(m.groups()[0]) for m in matches if m] # indices - * n = max(i) + 1 if i else 2 # increment number - * path = Path(f"{path}{sep}{n}{suffix}") # increment path # <<<<<<<<<<<<<< - * if mkdir: - * path.mkdir(parents=True, exist_ok=True) # make directory - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Path); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = PyTuple_New(4); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_9 = 0; - __pyx_t_10 = 127; - __pyx_t_12 = __Pyx_PyObject_FormatSimple(__pyx_v_path, __pyx_empty_unicode); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) : __pyx_t_10; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = __Pyx_PyObject_FormatSimple(__pyx_v_sep, __pyx_empty_unicode); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) : __pyx_t_10; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = __Pyx_PyObject_FormatSimple(__pyx_v_n, __pyx_empty_unicode); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) : __pyx_t_10; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = __Pyx_PyObject_FormatSimple(__pyx_v_suffix, __pyx_empty_unicode); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __pyx_t_10 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) > __pyx_t_10) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_12) : __pyx_t_10; - __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_12); - __Pyx_GIVEREF(__pyx_t_12); - PyTuple_SET_ITEM(__pyx_t_8, 3, __pyx_t_12); - __pyx_t_12 = 0; - __pyx_t_12 = __Pyx_PyUnicode_Join(__pyx_t_8, 4, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_8, __pyx_t_12}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+1-__pyx_t_4, 1+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 424, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __Pyx_DECREF_SET(__pyx_v_path, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":418 - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic - * if path.exists() and not exist_ok: # <<<<<<<<<<<<<< - * path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - * dirs = glob.glob(f"{path}{sep}*") # similar paths - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":425 - * n = max(i) + 1 if i else 2 # increment number - * path = Path(f"{path}{sep}{n}{suffix}") # increment path - * if mkdir: # <<<<<<<<<<<<<< - * path.mkdir(parents=True, exist_ok=True) # make directory - * return path - */ - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_mkdir); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 425, __pyx_L1_error) - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":426 - * path = Path(f"{path}{sep}{n}{suffix}") # increment path - * if mkdir: - * path.mkdir(parents=True, exist_ok=True) # make directory # <<<<<<<<<<<<<< - * return path - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_path, __pyx_n_s_mkdir); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 426, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 426, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_parents, Py_True) < 0) __PYX_ERR(0, 426, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_exist_ok, Py_True) < 0) __PYX_ERR(0, 426, __pyx_L1_error) - __pyx_t_12 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 426, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":425 - * n = max(i) + 1 if i else 2 # increment number - * path = Path(f"{path}{sep}{n}{suffix}") # increment path - * if mkdir: # <<<<<<<<<<<<<< - * path.mkdir(parents=True, exist_ok=True) # make directory - * return path - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":427 - * if mkdir: - * path.mkdir(parents=True, exist_ok=True) # make directory - * return path # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_path); - __pyx_r = __pyx_v_path; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":415 - * - * - * def increment_path(path, exist_ok=False, sep='', mkdir=False): # <<<<<<<<<<<<<< - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_12); - __Pyx_XDECREF(__pyx_t_14); - __Pyx_XDECREF(__pyx_t_15); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.general.increment_path", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_suffix); - __Pyx_XDECREF(__pyx_v_dirs); - __Pyx_XDECREF(__pyx_v_matches); - __Pyx_XDECREF(__pyx_v_i); - __Pyx_XDECREF(__pyx_v_n); - __Pyx_XDECREF(__pyx_8genexpr8__pyx_v_d); - __Pyx_XDECREF(__pyx_8genexpr9__pyx_v_m); - __Pyx_XDECREF(__pyx_v_path); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_v_func); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *)o; - if (p->__pyx_v_func) { - e = (*v)(p->__pyx_v_func, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except(PyObject *o) { - PyObject* tmp; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except *)o; - tmp = ((PyObject*)p->__pyx_v_func); - p->__pyx_v_func = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except}, - {Py_tp_clear, (void *)__pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct__try_except", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct__try_except", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except, /*tp_traverse*/ - __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_v_opt); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *)o; - if (p->__pyx_v_opt) { - e = (*v)(p->__pyx_v_opt, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args(PyObject *o) { - PyObject* tmp; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args *)o; - tmp = ((PyObject*)p->__pyx_v_opt); - p->__pyx_v_opt = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args}, - {Py_tp_clear, (void *)__pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_1_print_args", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_1_print_args", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args, /*tp_traverse*/ - __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_outer_scope); - Py_CLEAR(p->__pyx_v_k); - Py_CLEAR(p->__pyx_v_v); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr *)o; - if (p->__pyx_outer_scope) { - e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; - } - if (p->__pyx_v_k) { - e = (*v)(p->__pyx_v_k, a); if (e) return e; - } - if (p->__pyx_v_v) { - e = (*v)(p->__pyx_v_v, a); if (e) return e; - } - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_2_genexpr", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_2_genexpr", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_v_exclude); - Py_CLEAR(p->__pyx_8genexpr2__pyx_v_k); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *)o; - if (p->__pyx_v_exclude) { - e = (*v)(p->__pyx_v_exclude, a); if (e) return e; - } - if (p->__pyx_8genexpr2__pyx_v_k) { - e = (*v)(p->__pyx_8genexpr2__pyx_v_k, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts(PyObject *o) { - PyObject* tmp; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts *)o; - tmp = ((PyObject*)p->__pyx_v_exclude); - p->__pyx_v_exclude = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->__pyx_8genexpr2__pyx_v_k); - p->__pyx_8genexpr2__pyx_v_k = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts}, - {Py_tp_clear, (void *)__pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_3_intersect_dicts", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_3_intersect_dicts", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts, /*tp_traverse*/ - __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_outer_scope); - Py_CLEAR(p->__pyx_v_x); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr *)o; - if (p->__pyx_outer_scope) { - e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; - } - if (p->__pyx_v_x) { - e = (*v)(p->__pyx_v_x, a); if (e) return e; - } - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_4_genexpr", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_4_genexpr", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_v_path); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *)o; - if (p->__pyx_v_path) { - e = (*v)(p->__pyx_v_path, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size(PyObject *o) { - PyObject* tmp; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size *)o; - tmp = ((PyObject*)p->__pyx_v_path); - p->__pyx_v_path = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size}, - {Py_tp_clear, (void *)__pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_5_file_size", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_5_file_size", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size, /*tp_traverse*/ - __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_outer_scope); - Py_CLEAR(p->__pyx_v_f); - Py_CLEAR(p->__pyx_t_0); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr *)o; - if (p->__pyx_outer_scope) { - e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; - } - if (p->__pyx_v_f) { - e = (*v)(p->__pyx_v_f, a); if (e) return e; - } - if (p->__pyx_t_0) { - e = (*v)(p->__pyx_t_0, a); if (e) return e; - } - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_6_genexpr", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_6_genexpr", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_v_steps); - Py_CLEAR(p->__pyx_v_y1); - Py_CLEAR(p->__pyx_v_y2); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *)o; - if (p->__pyx_v_steps) { - e = (*v)(p->__pyx_v_steps, a); if (e) return e; - } - if (p->__pyx_v_y1) { - e = (*v)(p->__pyx_v_y1, a); if (e) return e; - } - if (p->__pyx_v_y2) { - e = (*v)(p->__pyx_v_y2, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle(PyObject *o) { - PyObject* tmp; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle *)o; - tmp = ((PyObject*)p->__pyx_v_steps); - p->__pyx_v_steps = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->__pyx_v_y1); - p->__pyx_v_y1 = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->__pyx_v_y2); - p->__pyx_v_y2 = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle}, - {Py_tp_clear, (void *)__pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_7_one_cycle", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_7_one_cycle", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle, /*tp_traverse*/ - __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_v_args); - Py_CLEAR(p->__pyx_v_colors); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *)o; - if (p->__pyx_v_args) { - e = (*v)(p->__pyx_v_args, a); if (e) return e; - } - if (p->__pyx_v_colors) { - e = (*v)(p->__pyx_v_colors, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr(PyObject *o) { - PyObject* tmp; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr *)o; - tmp = ((PyObject*)p->__pyx_v_args); - p->__pyx_v_args = ((PyObject*)Py_None); Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->__pyx_v_colors); - p->__pyx_v_colors = ((PyObject*)Py_None); Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr}, - {Py_tp_clear, (void *)__pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_8_colorstr", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_8_colorstr", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr, /*tp_traverse*/ - __pyx_tp_clear_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr[8]; -static int __pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr = 0; - -static PyObject *__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - PyObject *o; - #if CYTHON_COMPILING_IN_LIMITED_API - allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); - o = alloc_func(t, 0); - #else - if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr)))) { - o = (PyObject*)__pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr[--__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr]; - memset(o, 0, sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr)); - (void) PyObject_INIT(o, t); - PyObject_GC_Track(o); - } else { - o = (*t->tp_alloc)(t, 0); - if (unlikely(!o)) return 0; - } - #endif - return o; -} - -static void __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr(PyObject *o) { - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *)o; - PyObject_GC_UnTrack(o); - Py_CLEAR(p->__pyx_outer_scope); - Py_CLEAR(p->__pyx_v_x); - if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr)))) { - __pyx_freelist_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr[__pyx_freecount_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr++] = ((struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *)o); - } else { - (*Py_TYPE(o)->tp_free)(o); - } -} - -static int __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *p = (struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr *)o; - if (p->__pyx_outer_scope) { - e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e; - } - if (p->__pyx_v_x) { - e = (*v)(p->__pyx_v_x, a); if (e) return e; - } - return 0; -} -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr_slots[] = { - {Py_tp_dealloc, (void *)__pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr}, - {Py_tp_traverse, (void *)__pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr}, - {Py_tp_new, (void *)__pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr}, - {0, 0}, -}; -static PyType_Spec __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr_spec = { - "pdf_toolbox.lib.dia_yolov5.utils.general.__pyx_scope_struct_9_genexpr", - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr), - 0, - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, - __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr_slots, -}; -#else - -static PyTypeObject __pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr = { - PyVarObject_HEAD_INIT(0, 0) - "pdf_toolbox.lib.dia_yolov5.utils.general.""__pyx_scope_struct_9_genexpr", /*tp_name*/ - sizeof(struct __pyx_obj_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - 0, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - #if !CYTHON_USE_TYPE_SPECS - 0, /*tp_dictoffset*/ - #endif - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - #if CYTHON_USE_TP_FINALIZE - 0, /*tp_finalize*/ - #else - NULL, /*tp_finalize*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif - #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, /*tp_pypy_flags*/ - #endif -}; -#endif - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif -/* #### Code section: pystring_table ### */ - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - #if CYTHON_USE_MODULE_STATE - {0, __pyx_k_0_0_0, sizeof(__pyx_k_0_0_0), 0, 1, 0, 0}, - {0, __pyx_k_0m, sizeof(__pyx_k_0m), 0, 1, 0, 0}, - {0, __pyx_k_11_5g, sizeof(__pyx_k_11_5g), 0, 1, 0, 0}, - {0, __pyx_k_1m, sizeof(__pyx_k_1m), 0, 1, 0, 0}, - {0, __pyx_k_30m, sizeof(__pyx_k_30m), 0, 1, 0, 0}, - {0, __pyx_k_31m, sizeof(__pyx_k_31m), 0, 1, 0, 0}, - {0, __pyx_k_32m, sizeof(__pyx_k_32m), 0, 1, 0, 0}, - {0, __pyx_k_33m, sizeof(__pyx_k_33m), 0, 1, 0, 0}, - {0, __pyx_k_34m, sizeof(__pyx_k_34m), 0, 1, 0, 0}, - {0, __pyx_k_35m, sizeof(__pyx_k_35m), 0, 1, 0, 0}, - {0, __pyx_k_36m, sizeof(__pyx_k_36m), 0, 1, 0, 0}, - {0, __pyx_k_37m, sizeof(__pyx_k_37m), 0, 1, 0, 0}, - {0, __pyx_k_3_6_2, sizeof(__pyx_k_3_6_2), 0, 1, 0, 0}, - {0, __pyx_k_4m, sizeof(__pyx_k_4m), 0, 1, 0, 0}, - {0, __pyx_k_90m, sizeof(__pyx_k_90m), 0, 1, 0, 0}, - {0, __pyx_k_91m, sizeof(__pyx_k_91m), 0, 1, 0, 0}, - {0, __pyx_k_92m, sizeof(__pyx_k_92m), 0, 1, 0, 0}, - {0, __pyx_k_93m, sizeof(__pyx_k_93m), 0, 1, 0, 0}, - {0, __pyx_k_94m, sizeof(__pyx_k_94m), 0, 1, 0, 0}, - {0, __pyx_k_95m, sizeof(__pyx_k_95m), 0, 1, 0, 0}, - {0, __pyx_k_96m, sizeof(__pyx_k_96m), 0, 1, 0, 0}, - {0, __pyx_k_97m, sizeof(__pyx_k_97m), 0, 1, 0, 0}, - {0, __pyx_k_AppData_Roaming, sizeof(__pyx_k_AppData_Roaming), 0, 1, 0, 0}, - {0, __pyx_k_AssertionError, sizeof(__pyx_k_AssertionError), 0, 0, 1, 1}, - {0, __pyx_k_Darwin, sizeof(__pyx_k_Darwin), 0, 1, 0, 1}, - {0, __pyx_k_FILE, sizeof(__pyx_k_FILE), 0, 0, 1, 1}, - {0, __pyx_k_INFO, sizeof(__pyx_k_INFO), 0, 0, 1, 1}, - {0, __pyx_k_Invalid_Confidence_threshold, sizeof(__pyx_k_Invalid_Confidence_threshold), 0, 1, 0, 0}, - {0, __pyx_k_Invalid_IoU, sizeof(__pyx_k_Invalid_IoU), 0, 1, 0, 0}, - {0, __pyx_k_LOGGER, sizeof(__pyx_k_LOGGER), 0, 0, 1, 1}, - {0, __pyx_k_Library_Application_Support, sizeof(__pyx_k_Library_Application_Support), 0, 1, 0, 0}, - {0, __pyx_k_Linux, sizeof(__pyx_k_Linux), 0, 1, 0, 1}, - {0, __pyx_k_NCOLS, sizeof(__pyx_k_NCOLS), 0, 0, 1, 1}, - {0, __pyx_k_NUMEXPR_MAX_THREADS, sizeof(__pyx_k_NUMEXPR_MAX_THREADS), 0, 1, 0, 1}, - {0, __pyx_k_NUM_THREADS, sizeof(__pyx_k_NUM_THREADS), 0, 0, 1, 1}, - {0, __pyx_k_OSError, sizeof(__pyx_k_OSError), 0, 0, 1, 1}, - {0, __pyx_k_Path, sizeof(__pyx_k_Path), 0, 0, 1, 1}, - {0, __pyx_k_Python, sizeof(__pyx_k_Python), 0, 1, 0, 0}, - {0, __pyx_k_RANK, sizeof(__pyx_k_RANK), 0, 1, 0, 1}, - {0, __pyx_k_ROOT, sizeof(__pyx_k_ROOT), 0, 0, 1, 1}, - {0, __pyx_k_R_OK, sizeof(__pyx_k_R_OK), 0, 0, 1, 1}, - {0, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, - {0, __pyx_k_Tensor, sizeof(__pyx_k_Tensor), 0, 0, 1, 1}, - {0, __pyx_k_Ultralytics, sizeof(__pyx_k_Ultralytics), 0, 1, 0, 1}, - {0, __pyx_k_VERBOSE, sizeof(__pyx_k_VERBOSE), 0, 0, 1, 1}, - {0, __pyx_k_WARNING, sizeof(__pyx_k_WARNING), 0, 0, 1, 1}, - {0, __pyx_k_WARNING_NMS_time_limit, sizeof(__pyx_k_WARNING_NMS_time_limit), 0, 1, 0, 0}, - {0, __pyx_k_WARNING_img_size, sizeof(__pyx_k_WARNING_img_size), 0, 1, 0, 0}, - {0, __pyx_k_Windows, sizeof(__pyx_k_Windows), 0, 1, 0, 1}, - {0, __pyx_k_YOLOV5_CONFIG_DIR, sizeof(__pyx_k_YOLOV5_CONFIG_DIR), 0, 1, 0, 1}, - {0, __pyx_k_YOLOv5_VERBOSE, sizeof(__pyx_k_YOLOv5_VERBOSE), 0, 1, 0, 1}, - {0, __pyx_k__10, sizeof(__pyx_k__10), 0, 1, 0, 0}, - {0, __pyx_k__11, sizeof(__pyx_k__11), 0, 1, 0, 0}, - {0, __pyx_k__14, sizeof(__pyx_k__14), 0, 1, 0, 1}, - {0, __pyx_k__15, sizeof(__pyx_k__15), 0, 1, 0, 0}, - {0, __pyx_k__16, sizeof(__pyx_k__16), 0, 1, 0, 0}, - {0, __pyx_k__17, sizeof(__pyx_k__17), 0, 1, 0, 0}, - {0, __pyx_k__18, sizeof(__pyx_k__18), 0, 1, 0, 0}, - {0, __pyx_k__20, sizeof(__pyx_k__20), 0, 0, 1, 1}, - {0, __pyx_k__20, sizeof(__pyx_k__20), 0, 1, 0, 0}, - {0, __pyx_k__21, sizeof(__pyx_k__21), 0, 1, 0, 0}, - {0, __pyx_k__22, sizeof(__pyx_k__22), 0, 1, 0, 1}, - {0, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 1}, - {0, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0}, - {0, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0}, - {0, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0}, - {0, __pyx_k__9, sizeof(__pyx_k__9), 0, 0, 1, 1}, - {0, __pyx_k__9, sizeof(__pyx_k__9), 0, 1, 0, 0}, - {0, __pyx_k_access, sizeof(__pyx_k_access), 0, 0, 1, 1}, - {0, __pyx_k_agnostic, sizeof(__pyx_k_agnostic), 0, 0, 1, 1}, - {0, __pyx_k_any, sizeof(__pyx_k_any), 0, 0, 1, 1}, - {0, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1}, - {0, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, - {0, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, - {0, __pyx_k_as_tuple, sizeof(__pyx_k_as_tuple), 0, 0, 1, 1}, - {0, __pyx_k_ascii, sizeof(__pyx_k_ascii), 0, 1, 0, 1}, - {0, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1}, - {0, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, - {0, __pyx_k_backends, sizeof(__pyx_k_backends), 0, 0, 1, 1}, - {0, __pyx_k_basicConfig, sizeof(__pyx_k_basicConfig), 0, 0, 1, 1}, - {0, __pyx_k_benchmark, sizeof(__pyx_k_benchmark), 0, 0, 1, 1}, - {0, __pyx_k_bincount, sizeof(__pyx_k_bincount), 0, 0, 1, 1}, - {0, __pyx_k_black, sizeof(__pyx_k_black), 0, 1, 0, 1}, - {0, __pyx_k_blue, sizeof(__pyx_k_blue), 0, 1, 0, 1}, - {0, __pyx_k_bold, sizeof(__pyx_k_bold), 0, 1, 0, 1}, - {0, __pyx_k_box, sizeof(__pyx_k_box), 0, 0, 1, 1}, - {0, __pyx_k_box_iou, sizeof(__pyx_k_box_iou), 0, 0, 1, 1}, - {0, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, - {0, __pyx_k_bright_black, sizeof(__pyx_k_bright_black), 0, 1, 0, 1}, - {0, __pyx_k_bright_blue, sizeof(__pyx_k_bright_blue), 0, 1, 0, 1}, - {0, __pyx_k_bright_cyan, sizeof(__pyx_k_bright_cyan), 0, 1, 0, 1}, - {0, __pyx_k_bright_green, sizeof(__pyx_k_bright_green), 0, 1, 0, 1}, - {0, __pyx_k_bright_magenta, sizeof(__pyx_k_bright_magenta), 0, 1, 0, 1}, - {0, __pyx_k_bright_red, sizeof(__pyx_k_bright_red), 0, 1, 0, 1}, - {0, __pyx_k_bright_white, sizeof(__pyx_k_bright_white), 0, 1, 0, 1}, - {0, __pyx_k_bright_yellow, sizeof(__pyx_k_bright_yellow), 0, 1, 0, 1}, - {0, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {0, __pyx_k_cat, sizeof(__pyx_k_cat), 0, 0, 1, 1}, - {0, __pyx_k_ceil, sizeof(__pyx_k_ceil), 0, 0, 1, 1}, - {0, __pyx_k_cfg, sizeof(__pyx_k_cfg), 0, 0, 1, 1}, - {0, __pyx_k_check_img_size, sizeof(__pyx_k_check_img_size), 0, 0, 1, 1}, - {0, __pyx_k_check_python, sizeof(__pyx_k_check_python), 0, 0, 1, 1}, - {0, __pyx_k_check_version, sizeof(__pyx_k_check_version), 0, 0, 1, 1}, - {0, __pyx_k_clamp, sizeof(__pyx_k_clamp), 0, 0, 1, 1}, - {0, __pyx_k_class_counts, sizeof(__pyx_k_class_counts), 0, 0, 1, 1}, - {0, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, - {0, __pyx_k_class_weights, sizeof(__pyx_k_class_weights), 0, 0, 1, 1}, - {0, __pyx_k_classes, sizeof(__pyx_k_classes), 0, 0, 1, 1}, - {0, __pyx_k_clean_str, sizeof(__pyx_k_clean_str), 0, 0, 1, 1}, - {0, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {0, __pyx_k_clip, sizeof(__pyx_k_clip), 0, 0, 1, 1}, - {0, __pyx_k_clip_coords, sizeof(__pyx_k_clip_coords), 0, 0, 1, 1}, - {0, __pyx_k_clone, sizeof(__pyx_k_clone), 0, 0, 1, 1}, - {0, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1}, - {0, __pyx_k_colors, sizeof(__pyx_k_colors), 0, 0, 1, 1}, - {0, __pyx_k_colorstr, sizeof(__pyx_k_colorstr), 0, 0, 1, 1}, - {0, __pyx_k_colorstr_locals_genexpr, sizeof(__pyx_k_colorstr_locals_genexpr), 0, 0, 1, 1}, - {0, __pyx_k_columns, sizeof(__pyx_k_columns), 0, 0, 1, 1}, - {0, __pyx_k_concatenate, sizeof(__pyx_k_concatenate), 0, 0, 1, 1}, - {0, __pyx_k_conf, sizeof(__pyx_k_conf), 0, 0, 1, 1}, - {0, __pyx_k_conf_thres, sizeof(__pyx_k_conf_thres), 0, 0, 1, 1}, - {0, __pyx_k_config, sizeof(__pyx_k_config), 0, 1, 0, 0}, - {0, __pyx_k_coords, sizeof(__pyx_k_coords), 0, 0, 1, 1}, - {0, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, - {0, __pyx_k_cos, sizeof(__pyx_k_cos), 0, 0, 1, 1}, - {0, __pyx_k_cpu_count, sizeof(__pyx_k_cpu_count), 0, 0, 1, 1}, - {0, __pyx_k_cudnn, sizeof(__pyx_k_cudnn), 0, 0, 1, 1}, - {0, __pyx_k_current, sizeof(__pyx_k_current), 0, 0, 1, 1}, - {0, __pyx_k_cv2, sizeof(__pyx_k_cv2), 0, 0, 1, 1}, - {0, __pyx_k_cyan, sizeof(__pyx_k_cyan), 0, 1, 0, 1}, - {0, __pyx_k_d, sizeof(__pyx_k_d), 0, 1, 0, 0}, - {0, __pyx_k_d_2, sizeof(__pyx_k_d_2), 0, 0, 1, 1}, - {0, __pyx_k_da, sizeof(__pyx_k_da), 0, 0, 1, 1}, - {0, __pyx_k_db, sizeof(__pyx_k_db), 0, 0, 1, 1}, - {0, __pyx_k_decode, sizeof(__pyx_k_decode), 0, 0, 1, 1}, - {0, __pyx_k_descending, sizeof(__pyx_k_descending), 0, 0, 1, 1}, - {0, __pyx_k_deterministic, sizeof(__pyx_k_deterministic), 0, 0, 1, 1}, - {0, __pyx_k_device, sizeof(__pyx_k_device), 0, 0, 1, 1}, - {0, __pyx_k_dir, sizeof(__pyx_k_dir), 0, 0, 1, 1}, - {0, __pyx_k_dirs, sizeof(__pyx_k_dirs), 0, 0, 1, 1}, - {0, __pyx_k_disable, sizeof(__pyx_k_disable), 0, 1, 0, 0}, - {0, __pyx_k_display, sizeof(__pyx_k_display), 0, 0, 1, 1}, - {0, __pyx_k_divisor, sizeof(__pyx_k_divisor), 0, 0, 1, 1}, - {0, __pyx_k_e, sizeof(__pyx_k_e), 0, 0, 1, 1}, - {0, __pyx_k_emojis, sizeof(__pyx_k_emojis), 0, 0, 1, 1}, - {0, __pyx_k_enable, sizeof(__pyx_k_enable), 0, 1, 0, 0}, - {0, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {0, __pyx_k_end, sizeof(__pyx_k_end), 0, 1, 0, 1}, - {0, __pyx_k_enter, sizeof(__pyx_k_enter), 0, 0, 1, 1}, - {0, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {0, __pyx_k_env, sizeof(__pyx_k_env), 0, 0, 1, 1}, - {0, __pyx_k_env_var, sizeof(__pyx_k_env_var), 0, 0, 1, 1}, - {0, __pyx_k_environ, sizeof(__pyx_k_environ), 0, 0, 1, 1}, - {0, __pyx_k_eps, sizeof(__pyx_k_eps), 0, 0, 1, 1}, - {0, __pyx_k_exclude, sizeof(__pyx_k_exclude), 0, 0, 1, 1}, - {0, __pyx_k_exist_ok, sizeof(__pyx_k_exist_ok), 0, 0, 1, 1}, - {0, __pyx_k_exists, sizeof(__pyx_k_exists), 0, 0, 1, 1}, - {0, __pyx_k_exit, sizeof(__pyx_k_exit), 0, 0, 1, 1}, - {0, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, - {0, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1}, - {0, __pyx_k_file_2, sizeof(__pyx_k_file_2), 0, 0, 1, 1}, - {0, __pyx_k_file_size, sizeof(__pyx_k_file_size), 0, 0, 1, 1}, - {0, __pyx_k_file_size_locals_genexpr, sizeof(__pyx_k_file_size_locals_genexpr), 0, 0, 1, 1}, - {0, __pyx_k_fitness, sizeof(__pyx_k_fitness), 0, 0, 1, 1}, - {0, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, - {0, __pyx_k_float_kind, sizeof(__pyx_k_float_kind), 0, 1, 0, 1}, - {0, __pyx_k_floor, sizeof(__pyx_k_floor), 0, 0, 1, 1}, - {0, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {0, __pyx_k_formatter, sizeof(__pyx_k_formatter), 0, 0, 1, 1}, - {0, __pyx_k_from_numpy, sizeof(__pyx_k_from_numpy), 0, 0, 1, 1}, - {0, __pyx_k_func, sizeof(__pyx_k_func), 0, 0, 1, 1}, - {0, __pyx_k_gain, sizeof(__pyx_k_gain), 0, 0, 1, 1}, - {0, __pyx_k_gc, sizeof(__pyx_k_gc), 0, 1, 0, 0}, - {0, __pyx_k_genexpr, sizeof(__pyx_k_genexpr), 0, 0, 1, 1}, - {0, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1}, - {0, __pyx_k_getLogger, sizeof(__pyx_k_getLogger), 0, 0, 1, 1}, - {0, __pyx_k_get_latest_run, sizeof(__pyx_k_get_latest_run), 0, 0, 1, 1}, - {0, __pyx_k_get_terminal_size, sizeof(__pyx_k_get_terminal_size), 0, 0, 1, 1}, - {0, __pyx_k_getctime, sizeof(__pyx_k_getctime), 0, 0, 1, 1}, - {0, __pyx_k_getenv, sizeof(__pyx_k_getenv), 0, 0, 1, 1}, - {0, __pyx_k_glob, sizeof(__pyx_k_glob), 0, 0, 1, 1}, - {0, __pyx_k_green, sizeof(__pyx_k_green), 0, 1, 0, 1}, - {0, __pyx_k_groups, sizeof(__pyx_k_groups), 0, 0, 1, 1}, - {0, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, - {0, __pyx_k_handler, sizeof(__pyx_k_handler), 0, 0, 1, 1}, - {0, __pyx_k_hard, sizeof(__pyx_k_hard), 0, 0, 1, 1}, - {0, __pyx_k_home, sizeof(__pyx_k_home), 0, 0, 1, 1}, - {0, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, - {0, __pyx_k_ignore, sizeof(__pyx_k_ignore), 0, 1, 0, 1}, - {0, __pyx_k_image_weights, sizeof(__pyx_k_image_weights), 0, 0, 1, 1}, - {0, __pyx_k_img0_shape, sizeof(__pyx_k_img0_shape), 0, 0, 1, 1}, - {0, __pyx_k_img1_shape, sizeof(__pyx_k_img1_shape), 0, 0, 1, 1}, - {0, __pyx_k_imgsz, sizeof(__pyx_k_imgsz), 0, 0, 1, 1}, - {0, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {0, __pyx_k_increment_path, sizeof(__pyx_k_increment_path), 0, 0, 1, 1}, - {0, __pyx_k_info, sizeof(__pyx_k_info), 0, 0, 1, 1}, - {0, __pyx_k_init_seeds, sizeof(__pyx_k_init_seeds), 0, 0, 1, 1}, - {0, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, - {0, __pyx_k_input, sizeof(__pyx_k_input), 0, 0, 1, 1}, - {0, __pyx_k_instance, sizeof(__pyx_k_instance), 0, 0, 1, 1}, - {0, __pyx_k_int, sizeof(__pyx_k_int), 0, 0, 1, 1}, - {0, __pyx_k_intersect_dicts, sizeof(__pyx_k_intersect_dicts), 0, 0, 1, 1}, - {0, __pyx_k_intersect_dicts_locals_genexpr, sizeof(__pyx_k_intersect_dicts_locals_genexpr), 0, 0, 1, 1}, - {0, __pyx_k_iou, sizeof(__pyx_k_iou), 0, 0, 1, 1}, - {0, __pyx_k_iou_thres, sizeof(__pyx_k_iou_thres), 0, 0, 1, 1}, - {0, __pyx_k_is_ascii, sizeof(__pyx_k_is_ascii), 0, 0, 1, 1}, - {0, __pyx_k_is_chinese, sizeof(__pyx_k_is_chinese), 0, 0, 1, 1}, - {0, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, - {0, __pyx_k_is_dir, sizeof(__pyx_k_is_dir), 0, 0, 1, 1}, - {0, __pyx_k_is_file, sizeof(__pyx_k_is_file), 0, 0, 1, 1}, - {0, __pyx_k_is_writeable, sizeof(__pyx_k_is_writeable), 0, 0, 1, 1}, - {0, __pyx_k_isenabled, sizeof(__pyx_k_isenabled), 0, 1, 0, 0}, - {0, __pyx_k_items, sizeof(__pyx_k_items), 0, 0, 1, 1}, - {0, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, - {0, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, - {0, __pyx_k_keepdim, sizeof(__pyx_k_keepdim), 0, 0, 1, 1}, - {0, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, - {0, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, - {0, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, - {0, __pyx_k_labels, sizeof(__pyx_k_labels), 0, 0, 1, 1}, - {0, __pyx_k_labels_to_class_weights, sizeof(__pyx_k_labels_to_class_weights), 0, 0, 1, 1}, - {0, __pyx_k_labels_to_image_weights, sizeof(__pyx_k_labels_to_image_weights), 0, 0, 1, 1}, - {0, __pyx_k_last_list, sizeof(__pyx_k_last_list), 0, 0, 1, 1}, - {0, __pyx_k_last_pt, sizeof(__pyx_k_last_pt), 0, 1, 0, 0}, - {0, __pyx_k_level, sizeof(__pyx_k_level), 0, 0, 1, 1}, - {0, __pyx_k_linewidth, sizeof(__pyx_k_linewidth), 0, 0, 1, 1}, - {0, __pyx_k_logging, sizeof(__pyx_k_logging), 0, 0, 1, 1}, - {0, __pyx_k_long, sizeof(__pyx_k_long), 0, 0, 1, 1}, - {0, __pyx_k_long, sizeof(__pyx_k_long), 0, 1, 0, 1}, - {0, __pyx_k_lower, sizeof(__pyx_k_lower), 0, 0, 1, 1}, - {0, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, - {0, __pyx_k_magenta, sizeof(__pyx_k_magenta), 0, 1, 0, 1}, - {0, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {0, __pyx_k_make_divisible, sizeof(__pyx_k_make_divisible), 0, 0, 1, 1}, - {0, __pyx_k_manual_seed, sizeof(__pyx_k_manual_seed), 0, 0, 1, 1}, - {0, __pyx_k_matches, sizeof(__pyx_k_matches), 0, 0, 1, 1}, - {0, __pyx_k_math, sizeof(__pyx_k_math), 0, 0, 1, 1}, - {0, __pyx_k_max, sizeof(__pyx_k_max), 0, 0, 1, 1}, - {0, __pyx_k_max_columns, sizeof(__pyx_k_max_columns), 0, 0, 1, 1}, - {0, __pyx_k_max_det, sizeof(__pyx_k_max_det), 0, 0, 1, 1}, - {0, __pyx_k_max_nms, sizeof(__pyx_k_max_nms), 0, 0, 1, 1}, - {0, __pyx_k_max_wh, sizeof(__pyx_k_max_wh), 0, 0, 1, 1}, - {0, __pyx_k_merge, sizeof(__pyx_k_merge), 0, 0, 1, 1}, - {0, __pyx_k_message_s, sizeof(__pyx_k_message_s), 0, 1, 0, 0}, - {0, __pyx_k_methods, sizeof(__pyx_k_methods), 0, 0, 1, 1}, - {0, __pyx_k_min_wh, sizeof(__pyx_k_min_wh), 0, 0, 1, 1}, - {0, __pyx_k_minimum, sizeof(__pyx_k_minimum), 0, 0, 1, 1}, - {0, __pyx_k_minlength, sizeof(__pyx_k_minlength), 0, 0, 1, 1}, - {0, __pyx_k_mkdir, sizeof(__pyx_k_mkdir), 0, 0, 1, 1}, - {0, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1}, - {0, __pyx_k_multi_label, sizeof(__pyx_k_multi_label), 0, 0, 1, 1}, - {0, __pyx_k_must_be_multiple_of_max_stride, sizeof(__pyx_k_must_be_multiple_of_max_stride), 0, 1, 0, 0}, - {0, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, - {0, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {0, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {0, __pyx_k_nc, sizeof(__pyx_k_nc), 0, 0, 1, 1}, - {0, __pyx_k_new_size, sizeof(__pyx_k_new_size), 0, 0, 1, 1}, - {0, __pyx_k_nms, sizeof(__pyx_k_nms), 0, 0, 1, 1}, - {0, __pyx_k_non_max_suppression, sizeof(__pyx_k_non_max_suppression), 0, 0, 1, 1}, - {0, __pyx_k_nonzero, sizeof(__pyx_k_nonzero), 0, 0, 1, 1}, - {0, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {0, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {0, __pyx_k_one_cycle, sizeof(__pyx_k_one_cycle), 0, 0, 1, 1}, - {0, __pyx_k_one_cycle_locals_lambda, sizeof(__pyx_k_one_cycle_locals_lambda), 0, 0, 1, 1}, - {0, __pyx_k_ones, sizeof(__pyx_k_ones), 0, 0, 1, 1}, - {0, __pyx_k_open, sizeof(__pyx_k_open), 0, 0, 1, 1}, - {0, __pyx_k_ops, sizeof(__pyx_k_ops), 0, 0, 1, 1}, - {0, __pyx_k_opt, sizeof(__pyx_k_opt), 0, 0, 1, 1}, - {0, __pyx_k_options, sizeof(__pyx_k_options), 0, 0, 1, 1}, - {0, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1}, - {0, __pyx_k_output, sizeof(__pyx_k_output), 0, 0, 1, 1}, - {0, __pyx_k_pad, sizeof(__pyx_k_pad), 0, 0, 1, 1}, - {0, __pyx_k_padh, sizeof(__pyx_k_padh), 0, 0, 1, 1}, - {0, __pyx_k_padw, sizeof(__pyx_k_padw), 0, 0, 1, 1}, - {0, __pyx_k_pandas, sizeof(__pyx_k_pandas), 0, 0, 1, 1}, - {0, __pyx_k_parents, sizeof(__pyx_k_parents), 0, 0, 1, 1}, - {0, __pyx_k_parse, sizeof(__pyx_k_parse), 0, 0, 1, 1}, - {0, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, - {0, __pyx_k_pathlib, sizeof(__pyx_k_pathlib), 0, 0, 1, 1}, - {0, __pyx_k_pattern, sizeof(__pyx_k_pattern), 0, 0, 1, 1}, - {0, __pyx_k_pd, sizeof(__pyx_k_pd), 0, 0, 1, 1}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils), 0, 0, 1, 0}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2), 0, 0, 1, 1}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3), 0, 0, 1, 1}, - {0, __pyx_k_pi, sizeof(__pyx_k_pi), 0, 0, 1, 1}, - {0, __pyx_k_pinned, sizeof(__pyx_k_pinned), 0, 0, 1, 1}, - {0, __pyx_k_platform, sizeof(__pyx_k_platform), 0, 0, 1, 1}, - {0, __pyx_k_precision, sizeof(__pyx_k_precision), 0, 0, 1, 1}, - {0, __pyx_k_prediction, sizeof(__pyx_k_prediction), 0, 0, 1, 1}, - {0, __pyx_k_print, sizeof(__pyx_k_print), 0, 0, 1, 1}, - {0, __pyx_k_print_args, sizeof(__pyx_k_print_args), 0, 0, 1, 1}, - {0, __pyx_k_print_args_locals_genexpr, sizeof(__pyx_k_print_args_locals_genexpr), 0, 0, 1, 1}, - {0, __pyx_k_profile, sizeof(__pyx_k_profile), 0, 0, 1, 1}, - {0, __pyx_k_python_version, sizeof(__pyx_k_python_version), 0, 0, 1, 1}, - {0, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1}, - {0, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {0, __pyx_k_rank, sizeof(__pyx_k_rank), 0, 0, 1, 1}, - {0, __pyx_k_ratio_pad, sizeof(__pyx_k_ratio_pad), 0, 0, 1, 1}, - {0, __pyx_k_re, sizeof(__pyx_k_re), 0, 0, 1, 1}, - {0, __pyx_k_recursive, sizeof(__pyx_k_recursive), 0, 0, 1, 1}, - {0, __pyx_k_red, sizeof(__pyx_k_red), 0, 1, 0, 1}, - {0, __pyx_k_redundant, sizeof(__pyx_k_redundant), 0, 0, 1, 1}, - {0, __pyx_k_repl, sizeof(__pyx_k_repl), 0, 0, 1, 1}, - {0, __pyx_k_replace, sizeof(__pyx_k_replace), 0, 0, 1, 1}, - {0, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1}, - {0, __pyx_k_resolve, sizeof(__pyx_k_resolve), 0, 0, 1, 1}, - {0, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, - {0, __pyx_k_s_2, sizeof(__pyx_k_s_2), 0, 1, 0, 0}, - {0, __pyx_k_s_exceeded, sizeof(__pyx_k_s_exceeded), 0, 1, 0, 0}, - {0, __pyx_k_scale_coords, sizeof(__pyx_k_scale_coords), 0, 0, 1, 1}, - {0, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, - {0, __pyx_k_search, sizeof(__pyx_k_search), 0, 0, 1, 1}, - {0, __pyx_k_search_dir, sizeof(__pyx_k_search_dir), 0, 0, 1, 1}, - {0, __pyx_k_seed, sizeof(__pyx_k_seed), 0, 0, 1, 1}, - {0, __pyx_k_send, sizeof(__pyx_k_send), 0, 0, 1, 1}, - {0, __pyx_k_sep, sizeof(__pyx_k_sep), 0, 0, 1, 1}, - {0, __pyx_k_setNumThreads, sizeof(__pyx_k_setNumThreads), 0, 0, 1, 1}, - {0, __pyx_k_set_logging, sizeof(__pyx_k_set_logging), 0, 0, 1, 1}, - {0, __pyx_k_set_printoptions, sizeof(__pyx_k_set_printoptions), 0, 0, 1, 1}, - {0, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {0, __pyx_k_shutil, sizeof(__pyx_k_shutil), 0, 0, 1, 1}, - {0, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, - {0, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, - {0, __pyx_k_st_size, sizeof(__pyx_k_st_size), 0, 0, 1, 1}, - {0, __pyx_k_startswith, sizeof(__pyx_k_startswith), 0, 0, 1, 1}, - {0, __pyx_k_stat, sizeof(__pyx_k_stat), 0, 0, 1, 1}, - {0, __pyx_k_stem, sizeof(__pyx_k_stem), 0, 0, 1, 1}, - {0, __pyx_k_steps, sizeof(__pyx_k_steps), 0, 0, 1, 1}, - {0, __pyx_k_str, sizeof(__pyx_k_str), 0, 0, 1, 1}, - {0, __pyx_k_string, sizeof(__pyx_k_string), 0, 0, 1, 1}, - {0, __pyx_k_sub, sizeof(__pyx_k_sub), 0, 0, 1, 1}, - {0, __pyx_k_suffix, sizeof(__pyx_k_suffix), 0, 0, 1, 1}, - {0, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, - {0, __pyx_k_system, sizeof(__pyx_k_system), 0, 0, 1, 1}, - {0, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1}, - {0, __pyx_k_tensor, sizeof(__pyx_k_tensor), 0, 0, 1, 1}, - {0, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {0, __pyx_k_test_2, sizeof(__pyx_k_test_2), 0, 0, 1, 1}, - {0, __pyx_k_throw, sizeof(__pyx_k_throw), 0, 0, 1, 1}, - {0, __pyx_k_time, sizeof(__pyx_k_time), 0, 0, 1, 1}, - {0, __pyx_k_time_limit, sizeof(__pyx_k_time_limit), 0, 0, 1, 1}, - {0, __pyx_k_tmp, sizeof(__pyx_k_tmp), 0, 1, 0, 0}, - {0, __pyx_k_tmp_txt, sizeof(__pyx_k_tmp_txt), 0, 1, 0, 0}, - {0, __pyx_k_torch, sizeof(__pyx_k_torch), 0, 0, 1, 1}, - {0, __pyx_k_torch_backends_cudnn, sizeof(__pyx_k_torch_backends_cudnn), 0, 0, 1, 1}, - {0, __pyx_k_torchvision, sizeof(__pyx_k_torchvision), 0, 0, 1, 1}, - {0, __pyx_k_true, sizeof(__pyx_k_true), 0, 1, 0, 1}, - {0, __pyx_k_try_except, sizeof(__pyx_k_try_except), 0, 0, 1, 1}, - {0, __pyx_k_try_except_locals_handler, sizeof(__pyx_k_try_except_locals_handler), 0, 0, 1, 1}, - {0, __pyx_k_underline, sizeof(__pyx_k_underline), 0, 1, 0, 1}, - {0, __pyx_k_unlink, sizeof(__pyx_k_unlink), 0, 0, 1, 1}, - {0, __pyx_k_unquote, sizeof(__pyx_k_unquote), 0, 0, 1, 1}, - {0, __pyx_k_updating_to, sizeof(__pyx_k_updating_to), 0, 1, 0, 0}, - {0, __pyx_k_url, sizeof(__pyx_k_url), 0, 0, 1, 1}, - {0, __pyx_k_url2file, sizeof(__pyx_k_url2file), 0, 0, 1, 1}, - {0, __pyx_k_urllib, sizeof(__pyx_k_urllib), 0, 0, 1, 1}, - {0, __pyx_k_user_config_dir, sizeof(__pyx_k_user_config_dir), 0, 0, 1, 1}, - {0, __pyx_k_v, sizeof(__pyx_k_v), 0, 0, 1, 1}, - {0, __pyx_k_valid_values_are_between_0_0_an, sizeof(__pyx_k_valid_values_are_between_0_0_an), 0, 1, 0, 0}, - {0, __pyx_k_vars, sizeof(__pyx_k_vars), 0, 0, 1, 1}, - {0, __pyx_k_verbose, sizeof(__pyx_k_verbose), 0, 0, 1, 1}, - {0, __pyx_k_version, sizeof(__pyx_k_version), 0, 1, 0, 0}, - {0, __pyx_k_view, sizeof(__pyx_k_view), 0, 0, 1, 1}, - {0, __pyx_k_w, sizeof(__pyx_k_w), 0, 0, 1, 1}, - {0, __pyx_k_w, sizeof(__pyx_k_w), 0, 1, 0, 1}, - {0, __pyx_k_warning, sizeof(__pyx_k_warning), 0, 0, 1, 1}, - {0, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1}, - {0, __pyx_k_white, sizeof(__pyx_k_white), 0, 1, 0, 1}, - {0, __pyx_k_with_suffix, sizeof(__pyx_k_with_suffix), 0, 0, 1, 1}, - {0, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, - {0, __pyx_k_xc, sizeof(__pyx_k_xc), 0, 0, 1, 1}, - {0, __pyx_k_xi, sizeof(__pyx_k_xi), 0, 0, 1, 1}, - {0, __pyx_k_xyn2xy, sizeof(__pyx_k_xyn2xy), 0, 0, 1, 1}, - {0, __pyx_k_xywh2xyxy, sizeof(__pyx_k_xywh2xyxy), 0, 0, 1, 1}, - {0, __pyx_k_xywhn2xyxy, sizeof(__pyx_k_xywhn2xyxy), 0, 0, 1, 1}, - {0, __pyx_k_xyxy2xywh, sizeof(__pyx_k_xyxy2xywh), 0, 0, 1, 1}, - {0, __pyx_k_xyxy2xywhn, sizeof(__pyx_k_xyxy2xywhn), 0, 0, 1, 1}, - {0, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, - {0, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, - {0, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, - {0, __pyx_k_yaml, sizeof(__pyx_k_yaml), 0, 0, 1, 1}, - {0, __pyx_k_yellow, sizeof(__pyx_k_yellow), 0, 1, 0, 1}, - {0, __pyx_k_yolov5, sizeof(__pyx_k_yolov5), 0, 1, 0, 1}, - {0, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, - #else - {&__pyx_kp_u_0_0_0, __pyx_k_0_0_0, sizeof(__pyx_k_0_0_0), 0, 1, 0, 0}, - {&__pyx_kp_u_0m, __pyx_k_0m, sizeof(__pyx_k_0m), 0, 1, 0, 0}, - {&__pyx_kp_u_11_5g, __pyx_k_11_5g, sizeof(__pyx_k_11_5g), 0, 1, 0, 0}, - {&__pyx_kp_u_1m, __pyx_k_1m, sizeof(__pyx_k_1m), 0, 1, 0, 0}, - {&__pyx_kp_u_30m, __pyx_k_30m, sizeof(__pyx_k_30m), 0, 1, 0, 0}, - {&__pyx_kp_u_31m, __pyx_k_31m, sizeof(__pyx_k_31m), 0, 1, 0, 0}, - {&__pyx_kp_u_32m, __pyx_k_32m, sizeof(__pyx_k_32m), 0, 1, 0, 0}, - {&__pyx_kp_u_33m, __pyx_k_33m, sizeof(__pyx_k_33m), 0, 1, 0, 0}, - {&__pyx_kp_u_34m, __pyx_k_34m, sizeof(__pyx_k_34m), 0, 1, 0, 0}, - {&__pyx_kp_u_35m, __pyx_k_35m, sizeof(__pyx_k_35m), 0, 1, 0, 0}, - {&__pyx_kp_u_36m, __pyx_k_36m, sizeof(__pyx_k_36m), 0, 1, 0, 0}, - {&__pyx_kp_u_37m, __pyx_k_37m, sizeof(__pyx_k_37m), 0, 1, 0, 0}, - {&__pyx_kp_u_3_6_2, __pyx_k_3_6_2, sizeof(__pyx_k_3_6_2), 0, 1, 0, 0}, - {&__pyx_kp_u_4m, __pyx_k_4m, sizeof(__pyx_k_4m), 0, 1, 0, 0}, - {&__pyx_kp_u_90m, __pyx_k_90m, sizeof(__pyx_k_90m), 0, 1, 0, 0}, - {&__pyx_kp_u_91m, __pyx_k_91m, sizeof(__pyx_k_91m), 0, 1, 0, 0}, - {&__pyx_kp_u_92m, __pyx_k_92m, sizeof(__pyx_k_92m), 0, 1, 0, 0}, - {&__pyx_kp_u_93m, __pyx_k_93m, sizeof(__pyx_k_93m), 0, 1, 0, 0}, - {&__pyx_kp_u_94m, __pyx_k_94m, sizeof(__pyx_k_94m), 0, 1, 0, 0}, - {&__pyx_kp_u_95m, __pyx_k_95m, sizeof(__pyx_k_95m), 0, 1, 0, 0}, - {&__pyx_kp_u_96m, __pyx_k_96m, sizeof(__pyx_k_96m), 0, 1, 0, 0}, - {&__pyx_kp_u_97m, __pyx_k_97m, sizeof(__pyx_k_97m), 0, 1, 0, 0}, - {&__pyx_kp_u_AppData_Roaming, __pyx_k_AppData_Roaming, sizeof(__pyx_k_AppData_Roaming), 0, 1, 0, 0}, - {&__pyx_n_s_AssertionError, __pyx_k_AssertionError, sizeof(__pyx_k_AssertionError), 0, 0, 1, 1}, - {&__pyx_n_u_Darwin, __pyx_k_Darwin, sizeof(__pyx_k_Darwin), 0, 1, 0, 1}, - {&__pyx_n_s_FILE, __pyx_k_FILE, sizeof(__pyx_k_FILE), 0, 0, 1, 1}, - {&__pyx_n_s_INFO, __pyx_k_INFO, sizeof(__pyx_k_INFO), 0, 0, 1, 1}, - {&__pyx_kp_u_Invalid_Confidence_threshold, __pyx_k_Invalid_Confidence_threshold, sizeof(__pyx_k_Invalid_Confidence_threshold), 0, 1, 0, 0}, - {&__pyx_kp_u_Invalid_IoU, __pyx_k_Invalid_IoU, sizeof(__pyx_k_Invalid_IoU), 0, 1, 0, 0}, - {&__pyx_n_s_LOGGER, __pyx_k_LOGGER, sizeof(__pyx_k_LOGGER), 0, 0, 1, 1}, - {&__pyx_kp_u_Library_Application_Support, __pyx_k_Library_Application_Support, sizeof(__pyx_k_Library_Application_Support), 0, 1, 0, 0}, - {&__pyx_n_u_Linux, __pyx_k_Linux, sizeof(__pyx_k_Linux), 0, 1, 0, 1}, - {&__pyx_n_s_NCOLS, __pyx_k_NCOLS, sizeof(__pyx_k_NCOLS), 0, 0, 1, 1}, - {&__pyx_n_u_NUMEXPR_MAX_THREADS, __pyx_k_NUMEXPR_MAX_THREADS, sizeof(__pyx_k_NUMEXPR_MAX_THREADS), 0, 1, 0, 1}, - {&__pyx_n_s_NUM_THREADS, __pyx_k_NUM_THREADS, sizeof(__pyx_k_NUM_THREADS), 0, 0, 1, 1}, - {&__pyx_n_s_OSError, __pyx_k_OSError, sizeof(__pyx_k_OSError), 0, 0, 1, 1}, - {&__pyx_n_s_Path, __pyx_k_Path, sizeof(__pyx_k_Path), 0, 0, 1, 1}, - {&__pyx_kp_u_Python, __pyx_k_Python, sizeof(__pyx_k_Python), 0, 1, 0, 0}, - {&__pyx_n_u_RANK, __pyx_k_RANK, sizeof(__pyx_k_RANK), 0, 1, 0, 1}, - {&__pyx_n_s_ROOT, __pyx_k_ROOT, sizeof(__pyx_k_ROOT), 0, 0, 1, 1}, - {&__pyx_n_s_R_OK, __pyx_k_R_OK, sizeof(__pyx_k_R_OK), 0, 0, 1, 1}, - {&__pyx_n_s_T, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, - {&__pyx_n_s_Tensor, __pyx_k_Tensor, sizeof(__pyx_k_Tensor), 0, 0, 1, 1}, - {&__pyx_n_u_Ultralytics, __pyx_k_Ultralytics, sizeof(__pyx_k_Ultralytics), 0, 1, 0, 1}, - {&__pyx_n_s_VERBOSE, __pyx_k_VERBOSE, sizeof(__pyx_k_VERBOSE), 0, 0, 1, 1}, - {&__pyx_n_s_WARNING, __pyx_k_WARNING, sizeof(__pyx_k_WARNING), 0, 0, 1, 1}, - {&__pyx_kp_u_WARNING_NMS_time_limit, __pyx_k_WARNING_NMS_time_limit, sizeof(__pyx_k_WARNING_NMS_time_limit), 0, 1, 0, 0}, - {&__pyx_kp_u_WARNING_img_size, __pyx_k_WARNING_img_size, sizeof(__pyx_k_WARNING_img_size), 0, 1, 0, 0}, - {&__pyx_n_u_Windows, __pyx_k_Windows, sizeof(__pyx_k_Windows), 0, 1, 0, 1}, - {&__pyx_n_u_YOLOV5_CONFIG_DIR, __pyx_k_YOLOV5_CONFIG_DIR, sizeof(__pyx_k_YOLOV5_CONFIG_DIR), 0, 1, 0, 1}, - {&__pyx_n_u_YOLOv5_VERBOSE, __pyx_k_YOLOv5_VERBOSE, sizeof(__pyx_k_YOLOv5_VERBOSE), 0, 1, 0, 1}, - {&__pyx_kp_u__10, __pyx_k__10, sizeof(__pyx_k__10), 0, 1, 0, 0}, - {&__pyx_kp_u__11, __pyx_k__11, sizeof(__pyx_k__11), 0, 1, 0, 0}, - {&__pyx_n_u__14, __pyx_k__14, sizeof(__pyx_k__14), 0, 1, 0, 1}, - {&__pyx_kp_u__15, __pyx_k__15, sizeof(__pyx_k__15), 0, 1, 0, 0}, - {&__pyx_kp_u__16, __pyx_k__16, sizeof(__pyx_k__16), 0, 1, 0, 0}, - {&__pyx_kp_u__17, __pyx_k__17, sizeof(__pyx_k__17), 0, 1, 0, 0}, - {&__pyx_kp_u__18, __pyx_k__18, sizeof(__pyx_k__18), 0, 1, 0, 0}, - {&__pyx_n_s__20, __pyx_k__20, sizeof(__pyx_k__20), 0, 0, 1, 1}, - {&__pyx_kp_u__20, __pyx_k__20, sizeof(__pyx_k__20), 0, 1, 0, 0}, - {&__pyx_kp_u__21, __pyx_k__21, sizeof(__pyx_k__21), 0, 1, 0, 0}, - {&__pyx_n_u__22, __pyx_k__22, sizeof(__pyx_k__22), 0, 1, 0, 1}, - {&__pyx_n_u__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 1}, - {&__pyx_kp_u__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0}, - {&__pyx_kp_u__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0}, - {&__pyx_kp_u__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0}, - {&__pyx_n_s__9, __pyx_k__9, sizeof(__pyx_k__9), 0, 0, 1, 1}, - {&__pyx_kp_u__9, __pyx_k__9, sizeof(__pyx_k__9), 0, 1, 0, 0}, - {&__pyx_n_s_access, __pyx_k_access, sizeof(__pyx_k_access), 0, 0, 1, 1}, - {&__pyx_n_s_agnostic, __pyx_k_agnostic, sizeof(__pyx_k_agnostic), 0, 0, 1, 1}, - {&__pyx_n_s_any, __pyx_k_any, sizeof(__pyx_k_any), 0, 0, 1, 1}, - {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1}, - {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, - {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, - {&__pyx_n_s_as_tuple, __pyx_k_as_tuple, sizeof(__pyx_k_as_tuple), 0, 0, 1, 1}, - {&__pyx_n_u_ascii, __pyx_k_ascii, sizeof(__pyx_k_ascii), 0, 1, 0, 1}, - {&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1}, - {&__pyx_n_s_asyncio_coroutines, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, - {&__pyx_n_s_backends, __pyx_k_backends, sizeof(__pyx_k_backends), 0, 0, 1, 1}, - {&__pyx_n_s_basicConfig, __pyx_k_basicConfig, sizeof(__pyx_k_basicConfig), 0, 0, 1, 1}, - {&__pyx_n_s_benchmark, __pyx_k_benchmark, sizeof(__pyx_k_benchmark), 0, 0, 1, 1}, - {&__pyx_n_s_bincount, __pyx_k_bincount, sizeof(__pyx_k_bincount), 0, 0, 1, 1}, - {&__pyx_n_u_black, __pyx_k_black, sizeof(__pyx_k_black), 0, 1, 0, 1}, - {&__pyx_n_u_blue, __pyx_k_blue, sizeof(__pyx_k_blue), 0, 1, 0, 1}, - {&__pyx_n_u_bold, __pyx_k_bold, sizeof(__pyx_k_bold), 0, 1, 0, 1}, - {&__pyx_n_s_box, __pyx_k_box, sizeof(__pyx_k_box), 0, 0, 1, 1}, - {&__pyx_n_s_box_iou, __pyx_k_box_iou, sizeof(__pyx_k_box_iou), 0, 0, 1, 1}, - {&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, - {&__pyx_n_u_bright_black, __pyx_k_bright_black, sizeof(__pyx_k_bright_black), 0, 1, 0, 1}, - {&__pyx_n_u_bright_blue, __pyx_k_bright_blue, sizeof(__pyx_k_bright_blue), 0, 1, 0, 1}, - {&__pyx_n_u_bright_cyan, __pyx_k_bright_cyan, sizeof(__pyx_k_bright_cyan), 0, 1, 0, 1}, - {&__pyx_n_u_bright_green, __pyx_k_bright_green, sizeof(__pyx_k_bright_green), 0, 1, 0, 1}, - {&__pyx_n_u_bright_magenta, __pyx_k_bright_magenta, sizeof(__pyx_k_bright_magenta), 0, 1, 0, 1}, - {&__pyx_n_u_bright_red, __pyx_k_bright_red, sizeof(__pyx_k_bright_red), 0, 1, 0, 1}, - {&__pyx_n_u_bright_white, __pyx_k_bright_white, sizeof(__pyx_k_bright_white), 0, 1, 0, 1}, - {&__pyx_n_u_bright_yellow, __pyx_k_bright_yellow, sizeof(__pyx_k_bright_yellow), 0, 1, 0, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_s_cat, __pyx_k_cat, sizeof(__pyx_k_cat), 0, 0, 1, 1}, - {&__pyx_n_s_ceil, __pyx_k_ceil, sizeof(__pyx_k_ceil), 0, 0, 1, 1}, - {&__pyx_n_s_cfg, __pyx_k_cfg, sizeof(__pyx_k_cfg), 0, 0, 1, 1}, - {&__pyx_n_s_check_img_size, __pyx_k_check_img_size, sizeof(__pyx_k_check_img_size), 0, 0, 1, 1}, - {&__pyx_n_s_check_python, __pyx_k_check_python, sizeof(__pyx_k_check_python), 0, 0, 1, 1}, - {&__pyx_n_s_check_version, __pyx_k_check_version, sizeof(__pyx_k_check_version), 0, 0, 1, 1}, - {&__pyx_n_s_clamp, __pyx_k_clamp, sizeof(__pyx_k_clamp), 0, 0, 1, 1}, - {&__pyx_n_s_class_counts, __pyx_k_class_counts, sizeof(__pyx_k_class_counts), 0, 0, 1, 1}, - {&__pyx_n_s_class_getitem, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, - {&__pyx_n_s_class_weights, __pyx_k_class_weights, sizeof(__pyx_k_class_weights), 0, 0, 1, 1}, - {&__pyx_n_s_classes, __pyx_k_classes, sizeof(__pyx_k_classes), 0, 0, 1, 1}, - {&__pyx_n_s_clean_str, __pyx_k_clean_str, sizeof(__pyx_k_clean_str), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_clip, __pyx_k_clip, sizeof(__pyx_k_clip), 0, 0, 1, 1}, - {&__pyx_n_s_clip_coords, __pyx_k_clip_coords, sizeof(__pyx_k_clip_coords), 0, 0, 1, 1}, - {&__pyx_n_s_clone, __pyx_k_clone, sizeof(__pyx_k_clone), 0, 0, 1, 1}, - {&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1}, - {&__pyx_n_s_colors, __pyx_k_colors, sizeof(__pyx_k_colors), 0, 0, 1, 1}, - {&__pyx_n_s_colorstr, __pyx_k_colorstr, sizeof(__pyx_k_colorstr), 0, 0, 1, 1}, - {&__pyx_n_s_colorstr_locals_genexpr, __pyx_k_colorstr_locals_genexpr, sizeof(__pyx_k_colorstr_locals_genexpr), 0, 0, 1, 1}, - {&__pyx_n_s_columns, __pyx_k_columns, sizeof(__pyx_k_columns), 0, 0, 1, 1}, - {&__pyx_n_s_concatenate, __pyx_k_concatenate, sizeof(__pyx_k_concatenate), 0, 0, 1, 1}, - {&__pyx_n_s_conf, __pyx_k_conf, sizeof(__pyx_k_conf), 0, 0, 1, 1}, - {&__pyx_n_s_conf_thres, __pyx_k_conf_thres, sizeof(__pyx_k_conf_thres), 0, 0, 1, 1}, - {&__pyx_kp_u_config, __pyx_k_config, sizeof(__pyx_k_config), 0, 1, 0, 0}, - {&__pyx_n_s_coords, __pyx_k_coords, sizeof(__pyx_k_coords), 0, 0, 1, 1}, - {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, - {&__pyx_n_s_cos, __pyx_k_cos, sizeof(__pyx_k_cos), 0, 0, 1, 1}, - {&__pyx_n_s_cpu_count, __pyx_k_cpu_count, sizeof(__pyx_k_cpu_count), 0, 0, 1, 1}, - {&__pyx_n_s_cudnn, __pyx_k_cudnn, sizeof(__pyx_k_cudnn), 0, 0, 1, 1}, - {&__pyx_n_s_current, __pyx_k_current, sizeof(__pyx_k_current), 0, 0, 1, 1}, - {&__pyx_n_s_cv2, __pyx_k_cv2, sizeof(__pyx_k_cv2), 0, 0, 1, 1}, - {&__pyx_n_u_cyan, __pyx_k_cyan, sizeof(__pyx_k_cyan), 0, 1, 0, 1}, - {&__pyx_kp_u_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 1, 0, 0}, - {&__pyx_n_s_d_2, __pyx_k_d_2, sizeof(__pyx_k_d_2), 0, 0, 1, 1}, - {&__pyx_n_s_da, __pyx_k_da, sizeof(__pyx_k_da), 0, 0, 1, 1}, - {&__pyx_n_s_db, __pyx_k_db, sizeof(__pyx_k_db), 0, 0, 1, 1}, - {&__pyx_n_s_decode, __pyx_k_decode, sizeof(__pyx_k_decode), 0, 0, 1, 1}, - {&__pyx_n_s_descending, __pyx_k_descending, sizeof(__pyx_k_descending), 0, 0, 1, 1}, - {&__pyx_n_s_deterministic, __pyx_k_deterministic, sizeof(__pyx_k_deterministic), 0, 0, 1, 1}, - {&__pyx_n_s_device, __pyx_k_device, sizeof(__pyx_k_device), 0, 0, 1, 1}, - {&__pyx_n_s_dir, __pyx_k_dir, sizeof(__pyx_k_dir), 0, 0, 1, 1}, - {&__pyx_n_s_dirs, __pyx_k_dirs, sizeof(__pyx_k_dirs), 0, 0, 1, 1}, - {&__pyx_kp_u_disable, __pyx_k_disable, sizeof(__pyx_k_disable), 0, 1, 0, 0}, - {&__pyx_n_s_display, __pyx_k_display, sizeof(__pyx_k_display), 0, 0, 1, 1}, - {&__pyx_n_s_divisor, __pyx_k_divisor, sizeof(__pyx_k_divisor), 0, 0, 1, 1}, - {&__pyx_n_s_e, __pyx_k_e, sizeof(__pyx_k_e), 0, 0, 1, 1}, - {&__pyx_n_s_emojis, __pyx_k_emojis, sizeof(__pyx_k_emojis), 0, 0, 1, 1}, - {&__pyx_kp_u_enable, __pyx_k_enable, sizeof(__pyx_k_enable), 0, 1, 0, 0}, - {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {&__pyx_n_u_end, __pyx_k_end, sizeof(__pyx_k_end), 0, 1, 0, 1}, - {&__pyx_n_s_enter, __pyx_k_enter, sizeof(__pyx_k_enter), 0, 0, 1, 1}, - {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {&__pyx_n_s_env, __pyx_k_env, sizeof(__pyx_k_env), 0, 0, 1, 1}, - {&__pyx_n_s_env_var, __pyx_k_env_var, sizeof(__pyx_k_env_var), 0, 0, 1, 1}, - {&__pyx_n_s_environ, __pyx_k_environ, sizeof(__pyx_k_environ), 0, 0, 1, 1}, - {&__pyx_n_s_eps, __pyx_k_eps, sizeof(__pyx_k_eps), 0, 0, 1, 1}, - {&__pyx_n_s_exclude, __pyx_k_exclude, sizeof(__pyx_k_exclude), 0, 0, 1, 1}, - {&__pyx_n_s_exist_ok, __pyx_k_exist_ok, sizeof(__pyx_k_exist_ok), 0, 0, 1, 1}, - {&__pyx_n_s_exists, __pyx_k_exists, sizeof(__pyx_k_exists), 0, 0, 1, 1}, - {&__pyx_n_s_exit, __pyx_k_exit, sizeof(__pyx_k_exit), 0, 0, 1, 1}, - {&__pyx_n_s_f, __pyx_k_f, sizeof(__pyx_k_f), 0, 0, 1, 1}, - {&__pyx_n_s_file, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1}, - {&__pyx_n_s_file_2, __pyx_k_file_2, sizeof(__pyx_k_file_2), 0, 0, 1, 1}, - {&__pyx_n_s_file_size, __pyx_k_file_size, sizeof(__pyx_k_file_size), 0, 0, 1, 1}, - {&__pyx_n_s_file_size_locals_genexpr, __pyx_k_file_size_locals_genexpr, sizeof(__pyx_k_file_size_locals_genexpr), 0, 0, 1, 1}, - {&__pyx_n_s_fitness, __pyx_k_fitness, sizeof(__pyx_k_fitness), 0, 0, 1, 1}, - {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, - {&__pyx_n_u_float_kind, __pyx_k_float_kind, sizeof(__pyx_k_float_kind), 0, 1, 0, 1}, - {&__pyx_n_s_floor, __pyx_k_floor, sizeof(__pyx_k_floor), 0, 0, 1, 1}, - {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {&__pyx_n_s_formatter, __pyx_k_formatter, sizeof(__pyx_k_formatter), 0, 0, 1, 1}, - {&__pyx_n_s_from_numpy, __pyx_k_from_numpy, sizeof(__pyx_k_from_numpy), 0, 0, 1, 1}, - {&__pyx_n_s_func, __pyx_k_func, sizeof(__pyx_k_func), 0, 0, 1, 1}, - {&__pyx_n_s_gain, __pyx_k_gain, sizeof(__pyx_k_gain), 0, 0, 1, 1}, - {&__pyx_kp_u_gc, __pyx_k_gc, sizeof(__pyx_k_gc), 0, 1, 0, 0}, - {&__pyx_n_s_genexpr, __pyx_k_genexpr, sizeof(__pyx_k_genexpr), 0, 0, 1, 1}, - {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1}, - {&__pyx_n_s_getLogger, __pyx_k_getLogger, sizeof(__pyx_k_getLogger), 0, 0, 1, 1}, - {&__pyx_n_s_get_latest_run, __pyx_k_get_latest_run, sizeof(__pyx_k_get_latest_run), 0, 0, 1, 1}, - {&__pyx_n_s_get_terminal_size, __pyx_k_get_terminal_size, sizeof(__pyx_k_get_terminal_size), 0, 0, 1, 1}, - {&__pyx_n_s_getctime, __pyx_k_getctime, sizeof(__pyx_k_getctime), 0, 0, 1, 1}, - {&__pyx_n_s_getenv, __pyx_k_getenv, sizeof(__pyx_k_getenv), 0, 0, 1, 1}, - {&__pyx_n_s_glob, __pyx_k_glob, sizeof(__pyx_k_glob), 0, 0, 1, 1}, - {&__pyx_n_u_green, __pyx_k_green, sizeof(__pyx_k_green), 0, 1, 0, 1}, - {&__pyx_n_s_groups, __pyx_k_groups, sizeof(__pyx_k_groups), 0, 0, 1, 1}, - {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, - {&__pyx_n_s_handler, __pyx_k_handler, sizeof(__pyx_k_handler), 0, 0, 1, 1}, - {&__pyx_n_s_hard, __pyx_k_hard, sizeof(__pyx_k_hard), 0, 0, 1, 1}, - {&__pyx_n_s_home, __pyx_k_home, sizeof(__pyx_k_home), 0, 0, 1, 1}, - {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, - {&__pyx_n_u_ignore, __pyx_k_ignore, sizeof(__pyx_k_ignore), 0, 1, 0, 1}, - {&__pyx_n_s_image_weights, __pyx_k_image_weights, sizeof(__pyx_k_image_weights), 0, 0, 1, 1}, - {&__pyx_n_s_img0_shape, __pyx_k_img0_shape, sizeof(__pyx_k_img0_shape), 0, 0, 1, 1}, - {&__pyx_n_s_img1_shape, __pyx_k_img1_shape, sizeof(__pyx_k_img1_shape), 0, 0, 1, 1}, - {&__pyx_n_s_imgsz, __pyx_k_imgsz, sizeof(__pyx_k_imgsz), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_increment_path, __pyx_k_increment_path, sizeof(__pyx_k_increment_path), 0, 0, 1, 1}, - {&__pyx_n_s_info, __pyx_k_info, sizeof(__pyx_k_info), 0, 0, 1, 1}, - {&__pyx_n_s_init_seeds, __pyx_k_init_seeds, sizeof(__pyx_k_init_seeds), 0, 0, 1, 1}, - {&__pyx_n_s_initializing, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, - {&__pyx_n_s_input, __pyx_k_input, sizeof(__pyx_k_input), 0, 0, 1, 1}, - {&__pyx_n_s_instance, __pyx_k_instance, sizeof(__pyx_k_instance), 0, 0, 1, 1}, - {&__pyx_n_s_int, __pyx_k_int, sizeof(__pyx_k_int), 0, 0, 1, 1}, - {&__pyx_n_s_intersect_dicts, __pyx_k_intersect_dicts, sizeof(__pyx_k_intersect_dicts), 0, 0, 1, 1}, - {&__pyx_n_s_intersect_dicts_locals_genexpr, __pyx_k_intersect_dicts_locals_genexpr, sizeof(__pyx_k_intersect_dicts_locals_genexpr), 0, 0, 1, 1}, - {&__pyx_n_s_iou, __pyx_k_iou, sizeof(__pyx_k_iou), 0, 0, 1, 1}, - {&__pyx_n_s_iou_thres, __pyx_k_iou_thres, sizeof(__pyx_k_iou_thres), 0, 0, 1, 1}, - {&__pyx_n_s_is_ascii, __pyx_k_is_ascii, sizeof(__pyx_k_is_ascii), 0, 0, 1, 1}, - {&__pyx_n_s_is_chinese, __pyx_k_is_chinese, sizeof(__pyx_k_is_chinese), 0, 0, 1, 1}, - {&__pyx_n_s_is_coroutine, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, - {&__pyx_n_s_is_dir, __pyx_k_is_dir, sizeof(__pyx_k_is_dir), 0, 0, 1, 1}, - {&__pyx_n_s_is_file, __pyx_k_is_file, sizeof(__pyx_k_is_file), 0, 0, 1, 1}, - {&__pyx_n_s_is_writeable, __pyx_k_is_writeable, sizeof(__pyx_k_is_writeable), 0, 0, 1, 1}, - {&__pyx_kp_u_isenabled, __pyx_k_isenabled, sizeof(__pyx_k_isenabled), 0, 1, 0, 0}, - {&__pyx_n_s_items, __pyx_k_items, sizeof(__pyx_k_items), 0, 0, 1, 1}, - {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, - {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, - {&__pyx_n_s_keepdim, __pyx_k_keepdim, sizeof(__pyx_k_keepdim), 0, 0, 1, 1}, - {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, - {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, - {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, - {&__pyx_n_s_labels, __pyx_k_labels, sizeof(__pyx_k_labels), 0, 0, 1, 1}, - {&__pyx_n_s_labels_to_class_weights, __pyx_k_labels_to_class_weights, sizeof(__pyx_k_labels_to_class_weights), 0, 0, 1, 1}, - {&__pyx_n_s_labels_to_image_weights, __pyx_k_labels_to_image_weights, sizeof(__pyx_k_labels_to_image_weights), 0, 0, 1, 1}, - {&__pyx_n_s_last_list, __pyx_k_last_list, sizeof(__pyx_k_last_list), 0, 0, 1, 1}, - {&__pyx_kp_u_last_pt, __pyx_k_last_pt, sizeof(__pyx_k_last_pt), 0, 1, 0, 0}, - {&__pyx_n_s_level, __pyx_k_level, sizeof(__pyx_k_level), 0, 0, 1, 1}, - {&__pyx_n_s_linewidth, __pyx_k_linewidth, sizeof(__pyx_k_linewidth), 0, 0, 1, 1}, - {&__pyx_n_s_logging, __pyx_k_logging, sizeof(__pyx_k_logging), 0, 0, 1, 1}, - {&__pyx_n_s_long, __pyx_k_long, sizeof(__pyx_k_long), 0, 0, 1, 1}, - {&__pyx_n_u_long, __pyx_k_long, sizeof(__pyx_k_long), 0, 1, 0, 1}, - {&__pyx_n_s_lower, __pyx_k_lower, sizeof(__pyx_k_lower), 0, 0, 1, 1}, - {&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, - {&__pyx_n_u_magenta, __pyx_k_magenta, sizeof(__pyx_k_magenta), 0, 1, 0, 1}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_make_divisible, __pyx_k_make_divisible, sizeof(__pyx_k_make_divisible), 0, 0, 1, 1}, - {&__pyx_n_s_manual_seed, __pyx_k_manual_seed, sizeof(__pyx_k_manual_seed), 0, 0, 1, 1}, - {&__pyx_n_s_matches, __pyx_k_matches, sizeof(__pyx_k_matches), 0, 0, 1, 1}, - {&__pyx_n_s_math, __pyx_k_math, sizeof(__pyx_k_math), 0, 0, 1, 1}, - {&__pyx_n_s_max, __pyx_k_max, sizeof(__pyx_k_max), 0, 0, 1, 1}, - {&__pyx_n_s_max_columns, __pyx_k_max_columns, sizeof(__pyx_k_max_columns), 0, 0, 1, 1}, - {&__pyx_n_s_max_det, __pyx_k_max_det, sizeof(__pyx_k_max_det), 0, 0, 1, 1}, - {&__pyx_n_s_max_nms, __pyx_k_max_nms, sizeof(__pyx_k_max_nms), 0, 0, 1, 1}, - {&__pyx_n_s_max_wh, __pyx_k_max_wh, sizeof(__pyx_k_max_wh), 0, 0, 1, 1}, - {&__pyx_n_s_merge, __pyx_k_merge, sizeof(__pyx_k_merge), 0, 0, 1, 1}, - {&__pyx_kp_u_message_s, __pyx_k_message_s, sizeof(__pyx_k_message_s), 0, 1, 0, 0}, - {&__pyx_n_s_methods, __pyx_k_methods, sizeof(__pyx_k_methods), 0, 0, 1, 1}, - {&__pyx_n_s_min_wh, __pyx_k_min_wh, sizeof(__pyx_k_min_wh), 0, 0, 1, 1}, - {&__pyx_n_s_minimum, __pyx_k_minimum, sizeof(__pyx_k_minimum), 0, 0, 1, 1}, - {&__pyx_n_s_minlength, __pyx_k_minlength, sizeof(__pyx_k_minlength), 0, 0, 1, 1}, - {&__pyx_n_s_mkdir, __pyx_k_mkdir, sizeof(__pyx_k_mkdir), 0, 0, 1, 1}, - {&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1}, - {&__pyx_n_s_multi_label, __pyx_k_multi_label, sizeof(__pyx_k_multi_label), 0, 0, 1, 1}, - {&__pyx_kp_u_must_be_multiple_of_max_stride, __pyx_k_must_be_multiple_of_max_stride, sizeof(__pyx_k_must_be_multiple_of_max_stride), 0, 1, 0, 0}, - {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_nc, __pyx_k_nc, sizeof(__pyx_k_nc), 0, 0, 1, 1}, - {&__pyx_n_s_new_size, __pyx_k_new_size, sizeof(__pyx_k_new_size), 0, 0, 1, 1}, - {&__pyx_n_s_nms, __pyx_k_nms, sizeof(__pyx_k_nms), 0, 0, 1, 1}, - {&__pyx_n_s_non_max_suppression, __pyx_k_non_max_suppression, sizeof(__pyx_k_non_max_suppression), 0, 0, 1, 1}, - {&__pyx_n_s_nonzero, __pyx_k_nonzero, sizeof(__pyx_k_nonzero), 0, 0, 1, 1}, - {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {&__pyx_n_s_one_cycle, __pyx_k_one_cycle, sizeof(__pyx_k_one_cycle), 0, 0, 1, 1}, - {&__pyx_n_s_one_cycle_locals_lambda, __pyx_k_one_cycle_locals_lambda, sizeof(__pyx_k_one_cycle_locals_lambda), 0, 0, 1, 1}, - {&__pyx_n_s_ones, __pyx_k_ones, sizeof(__pyx_k_ones), 0, 0, 1, 1}, - {&__pyx_n_s_open, __pyx_k_open, sizeof(__pyx_k_open), 0, 0, 1, 1}, - {&__pyx_n_s_ops, __pyx_k_ops, sizeof(__pyx_k_ops), 0, 0, 1, 1}, - {&__pyx_n_s_opt, __pyx_k_opt, sizeof(__pyx_k_opt), 0, 0, 1, 1}, - {&__pyx_n_s_options, __pyx_k_options, sizeof(__pyx_k_options), 0, 0, 1, 1}, - {&__pyx_n_s_os, __pyx_k_os, sizeof(__pyx_k_os), 0, 0, 1, 1}, - {&__pyx_n_s_output, __pyx_k_output, sizeof(__pyx_k_output), 0, 0, 1, 1}, - {&__pyx_n_s_pad, __pyx_k_pad, sizeof(__pyx_k_pad), 0, 0, 1, 1}, - {&__pyx_n_s_padh, __pyx_k_padh, sizeof(__pyx_k_padh), 0, 0, 1, 1}, - {&__pyx_n_s_padw, __pyx_k_padw, sizeof(__pyx_k_padw), 0, 0, 1, 1}, - {&__pyx_n_s_pandas, __pyx_k_pandas, sizeof(__pyx_k_pandas), 0, 0, 1, 1}, - {&__pyx_n_s_parents, __pyx_k_parents, sizeof(__pyx_k_parents), 0, 0, 1, 1}, - {&__pyx_n_s_parse, __pyx_k_parse, sizeof(__pyx_k_parse), 0, 0, 1, 1}, - {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1}, - {&__pyx_n_s_pathlib, __pyx_k_pathlib, sizeof(__pyx_k_pathlib), 0, 0, 1, 1}, - {&__pyx_n_s_pattern, __pyx_k_pattern, sizeof(__pyx_k_pattern), 0, 0, 1, 1}, - {&__pyx_n_s_pd, __pyx_k_pd, sizeof(__pyx_k_pd), 0, 0, 1, 1}, - {&__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils), 0, 0, 1, 0}, - {&__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2), 0, 0, 1, 1}, - {&__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3), 0, 0, 1, 1}, - {&__pyx_n_s_pi, __pyx_k_pi, sizeof(__pyx_k_pi), 0, 0, 1, 1}, - {&__pyx_n_s_pinned, __pyx_k_pinned, sizeof(__pyx_k_pinned), 0, 0, 1, 1}, - {&__pyx_n_s_platform, __pyx_k_platform, sizeof(__pyx_k_platform), 0, 0, 1, 1}, - {&__pyx_n_s_precision, __pyx_k_precision, sizeof(__pyx_k_precision), 0, 0, 1, 1}, - {&__pyx_n_s_prediction, __pyx_k_prediction, sizeof(__pyx_k_prediction), 0, 0, 1, 1}, - {&__pyx_n_s_print, __pyx_k_print, sizeof(__pyx_k_print), 0, 0, 1, 1}, - {&__pyx_n_s_print_args, __pyx_k_print_args, sizeof(__pyx_k_print_args), 0, 0, 1, 1}, - {&__pyx_n_s_print_args_locals_genexpr, __pyx_k_print_args_locals_genexpr, sizeof(__pyx_k_print_args_locals_genexpr), 0, 0, 1, 1}, - {&__pyx_n_s_profile, __pyx_k_profile, sizeof(__pyx_k_profile), 0, 0, 1, 1}, - {&__pyx_n_s_python_version, __pyx_k_python_version, sizeof(__pyx_k_python_version), 0, 0, 1, 1}, - {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_rank, __pyx_k_rank, sizeof(__pyx_k_rank), 0, 0, 1, 1}, - {&__pyx_n_s_ratio_pad, __pyx_k_ratio_pad, sizeof(__pyx_k_ratio_pad), 0, 0, 1, 1}, - {&__pyx_n_s_re, __pyx_k_re, sizeof(__pyx_k_re), 0, 0, 1, 1}, - {&__pyx_n_s_recursive, __pyx_k_recursive, sizeof(__pyx_k_recursive), 0, 0, 1, 1}, - {&__pyx_n_u_red, __pyx_k_red, sizeof(__pyx_k_red), 0, 1, 0, 1}, - {&__pyx_n_s_redundant, __pyx_k_redundant, sizeof(__pyx_k_redundant), 0, 0, 1, 1}, - {&__pyx_n_s_repl, __pyx_k_repl, sizeof(__pyx_k_repl), 0, 0, 1, 1}, - {&__pyx_n_s_replace, __pyx_k_replace, sizeof(__pyx_k_replace), 0, 0, 1, 1}, - {&__pyx_n_s_reshape, __pyx_k_reshape, sizeof(__pyx_k_reshape), 0, 0, 1, 1}, - {&__pyx_n_s_resolve, __pyx_k_resolve, sizeof(__pyx_k_resolve), 0, 0, 1, 1}, - {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, - {&__pyx_kp_u_s_2, __pyx_k_s_2, sizeof(__pyx_k_s_2), 0, 1, 0, 0}, - {&__pyx_kp_u_s_exceeded, __pyx_k_s_exceeded, sizeof(__pyx_k_s_exceeded), 0, 1, 0, 0}, - {&__pyx_n_s_scale_coords, __pyx_k_scale_coords, sizeof(__pyx_k_scale_coords), 0, 0, 1, 1}, - {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, - {&__pyx_n_s_search, __pyx_k_search, sizeof(__pyx_k_search), 0, 0, 1, 1}, - {&__pyx_n_s_search_dir, __pyx_k_search_dir, sizeof(__pyx_k_search_dir), 0, 0, 1, 1}, - {&__pyx_n_s_seed, __pyx_k_seed, sizeof(__pyx_k_seed), 0, 0, 1, 1}, - {&__pyx_n_s_send, __pyx_k_send, sizeof(__pyx_k_send), 0, 0, 1, 1}, - {&__pyx_n_s_sep, __pyx_k_sep, sizeof(__pyx_k_sep), 0, 0, 1, 1}, - {&__pyx_n_s_setNumThreads, __pyx_k_setNumThreads, sizeof(__pyx_k_setNumThreads), 0, 0, 1, 1}, - {&__pyx_n_s_set_logging, __pyx_k_set_logging, sizeof(__pyx_k_set_logging), 0, 0, 1, 1}, - {&__pyx_n_s_set_printoptions, __pyx_k_set_printoptions, sizeof(__pyx_k_set_printoptions), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_shutil, __pyx_k_shutil, sizeof(__pyx_k_shutil), 0, 0, 1, 1}, - {&__pyx_n_s_spec, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, - {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, - {&__pyx_n_s_st_size, __pyx_k_st_size, sizeof(__pyx_k_st_size), 0, 0, 1, 1}, - {&__pyx_n_s_startswith, __pyx_k_startswith, sizeof(__pyx_k_startswith), 0, 0, 1, 1}, - {&__pyx_n_s_stat, __pyx_k_stat, sizeof(__pyx_k_stat), 0, 0, 1, 1}, - {&__pyx_n_s_stem, __pyx_k_stem, sizeof(__pyx_k_stem), 0, 0, 1, 1}, - {&__pyx_n_s_steps, __pyx_k_steps, sizeof(__pyx_k_steps), 0, 0, 1, 1}, - {&__pyx_n_s_str, __pyx_k_str, sizeof(__pyx_k_str), 0, 0, 1, 1}, - {&__pyx_n_s_string, __pyx_k_string, sizeof(__pyx_k_string), 0, 0, 1, 1}, - {&__pyx_n_s_sub, __pyx_k_sub, sizeof(__pyx_k_sub), 0, 0, 1, 1}, - {&__pyx_n_s_suffix, __pyx_k_suffix, sizeof(__pyx_k_suffix), 0, 0, 1, 1}, - {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, - {&__pyx_n_s_system, __pyx_k_system, sizeof(__pyx_k_system), 0, 0, 1, 1}, - {&__pyx_n_s_t, __pyx_k_t, sizeof(__pyx_k_t), 0, 0, 1, 1}, - {&__pyx_n_s_tensor, __pyx_k_tensor, sizeof(__pyx_k_tensor), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_n_s_test_2, __pyx_k_test_2, sizeof(__pyx_k_test_2), 0, 0, 1, 1}, - {&__pyx_n_s_throw, __pyx_k_throw, sizeof(__pyx_k_throw), 0, 0, 1, 1}, - {&__pyx_n_s_time, __pyx_k_time, sizeof(__pyx_k_time), 0, 0, 1, 1}, - {&__pyx_n_s_time_limit, __pyx_k_time_limit, sizeof(__pyx_k_time_limit), 0, 0, 1, 1}, - {&__pyx_kp_u_tmp, __pyx_k_tmp, sizeof(__pyx_k_tmp), 0, 1, 0, 0}, - {&__pyx_kp_u_tmp_txt, __pyx_k_tmp_txt, sizeof(__pyx_k_tmp_txt), 0, 1, 0, 0}, - {&__pyx_n_s_torch, __pyx_k_torch, sizeof(__pyx_k_torch), 0, 0, 1, 1}, - {&__pyx_n_s_torch_backends_cudnn, __pyx_k_torch_backends_cudnn, sizeof(__pyx_k_torch_backends_cudnn), 0, 0, 1, 1}, - {&__pyx_n_s_torchvision, __pyx_k_torchvision, sizeof(__pyx_k_torchvision), 0, 0, 1, 1}, - {&__pyx_n_u_true, __pyx_k_true, sizeof(__pyx_k_true), 0, 1, 0, 1}, - {&__pyx_n_s_try_except, __pyx_k_try_except, sizeof(__pyx_k_try_except), 0, 0, 1, 1}, - {&__pyx_n_s_try_except_locals_handler, __pyx_k_try_except_locals_handler, sizeof(__pyx_k_try_except_locals_handler), 0, 0, 1, 1}, - {&__pyx_n_u_underline, __pyx_k_underline, sizeof(__pyx_k_underline), 0, 1, 0, 1}, - {&__pyx_n_s_unlink, __pyx_k_unlink, sizeof(__pyx_k_unlink), 0, 0, 1, 1}, - {&__pyx_n_s_unquote, __pyx_k_unquote, sizeof(__pyx_k_unquote), 0, 0, 1, 1}, - {&__pyx_kp_u_updating_to, __pyx_k_updating_to, sizeof(__pyx_k_updating_to), 0, 1, 0, 0}, - {&__pyx_n_s_url, __pyx_k_url, sizeof(__pyx_k_url), 0, 0, 1, 1}, - {&__pyx_n_s_url2file, __pyx_k_url2file, sizeof(__pyx_k_url2file), 0, 0, 1, 1}, - {&__pyx_n_s_urllib, __pyx_k_urllib, sizeof(__pyx_k_urllib), 0, 0, 1, 1}, - {&__pyx_n_s_user_config_dir, __pyx_k_user_config_dir, sizeof(__pyx_k_user_config_dir), 0, 0, 1, 1}, - {&__pyx_n_s_v, __pyx_k_v, sizeof(__pyx_k_v), 0, 0, 1, 1}, - {&__pyx_kp_u_valid_values_are_between_0_0_an, __pyx_k_valid_values_are_between_0_0_an, sizeof(__pyx_k_valid_values_are_between_0_0_an), 0, 1, 0, 0}, - {&__pyx_n_s_vars, __pyx_k_vars, sizeof(__pyx_k_vars), 0, 0, 1, 1}, - {&__pyx_n_s_verbose, __pyx_k_verbose, sizeof(__pyx_k_verbose), 0, 0, 1, 1}, - {&__pyx_kp_u_version, __pyx_k_version, sizeof(__pyx_k_version), 0, 1, 0, 0}, - {&__pyx_n_s_view, __pyx_k_view, sizeof(__pyx_k_view), 0, 0, 1, 1}, - {&__pyx_n_s_w, __pyx_k_w, sizeof(__pyx_k_w), 0, 0, 1, 1}, - {&__pyx_n_u_w, __pyx_k_w, sizeof(__pyx_k_w), 0, 1, 0, 1}, - {&__pyx_n_s_warning, __pyx_k_warning, sizeof(__pyx_k_warning), 0, 0, 1, 1}, - {&__pyx_n_s_weights, __pyx_k_weights, sizeof(__pyx_k_weights), 0, 0, 1, 1}, - {&__pyx_n_u_white, __pyx_k_white, sizeof(__pyx_k_white), 0, 1, 0, 1}, - {&__pyx_n_s_with_suffix, __pyx_k_with_suffix, sizeof(__pyx_k_with_suffix), 0, 0, 1, 1}, - {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, - {&__pyx_n_s_xc, __pyx_k_xc, sizeof(__pyx_k_xc), 0, 0, 1, 1}, - {&__pyx_n_s_xi, __pyx_k_xi, sizeof(__pyx_k_xi), 0, 0, 1, 1}, - {&__pyx_n_s_xyn2xy, __pyx_k_xyn2xy, sizeof(__pyx_k_xyn2xy), 0, 0, 1, 1}, - {&__pyx_n_s_xywh2xyxy, __pyx_k_xywh2xyxy, sizeof(__pyx_k_xywh2xyxy), 0, 0, 1, 1}, - {&__pyx_n_s_xywhn2xyxy, __pyx_k_xywhn2xyxy, sizeof(__pyx_k_xywhn2xyxy), 0, 0, 1, 1}, - {&__pyx_n_s_xyxy2xywh, __pyx_k_xyxy2xywh, sizeof(__pyx_k_xyxy2xywh), 0, 0, 1, 1}, - {&__pyx_n_s_xyxy2xywhn, __pyx_k_xyxy2xywhn, sizeof(__pyx_k_xyxy2xywhn), 0, 0, 1, 1}, - {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, - {&__pyx_n_s_y1, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, - {&__pyx_n_s_y2, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, - {&__pyx_n_s_yaml, __pyx_k_yaml, sizeof(__pyx_k_yaml), 0, 0, 1, 1}, - {&__pyx_n_u_yellow, __pyx_k_yellow, sizeof(__pyx_k_yellow), 0, 1, 0, 1}, - {&__pyx_n_u_yolov5, __pyx_k_yolov5, sizeof(__pyx_k_yolov5), 0, 1, 0, 1}, - {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, - #endif - {0, 0, 0, 0, 0, 0, 0} -}; -/* #### Code section: cached_builtins ### */ -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_print = __Pyx_GetBuiltinName(__pyx_n_s_print); if (!__pyx_builtin_print) __PYX_ERR(0, 55, __pyx_L1_error) - __pyx_builtin_vars = __Pyx_GetBuiltinName(__pyx_n_s_vars); if (!__pyx_builtin_vars) __PYX_ERR(0, 67, __pyx_L1_error) - __pyx_builtin_max = __Pyx_GetBuiltinName(__pyx_n_s_max); if (!__pyx_builtin_max) __PYX_ERR(0, 88, __pyx_L1_error) - __pyx_builtin_open = __Pyx_GetBuiltinName(__pyx_n_s_open); if (!__pyx_builtin_open) __PYX_ERR(0, 109, __pyx_L1_error) - __pyx_builtin_OSError = __Pyx_GetBuiltinName(__pyx_n_s_OSError); if (!__pyx_builtin_OSError) __PYX_ERR(0, 113, __pyx_L1_error) - __pyx_builtin_sum = __Pyx_GetBuiltinName(__pyx_n_s_sum); if (!__pyx_builtin_sum) __PYX_ERR(0, 141, __pyx_L1_error) - __pyx_builtin_AssertionError = __Pyx_GetBuiltinName(__pyx_n_s_AssertionError); if (!__pyx_builtin_AssertionError) __PYX_ERR(0, 333, __pyx_L1_error) - __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(0, 346, __pyx_L1_error) - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 357, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: cached_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":42 - * def set_logging(name=None, verbose=VERBOSE): - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings # <<<<<<<<<<<<<< - * logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) - * return logging.getLogger(name) - */ - __pyx_tuple_ = PyTuple_Pack(2, __pyx_n_u_RANK, __pyx_int_neg_1); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 42, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple_); - __Pyx_GIVEREF(__pyx_tuple_); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":51 - * def try_except(func): - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): # <<<<<<<<<<<<<< - * try: - * func(*args, **kwargs) - */ - __pyx_tuple__2 = PyTuple_Pack(3, __pyx_n_s_args, __pyx_n_s_kwargs, __pyx_n_s_e); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 51, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - __pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(0, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARARGS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_handler, 51, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(0, 51, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":73 - * # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - * import torch.backends.cudnn as cudnn # <<<<<<<<<<<<<< - * random.seed(seed) - * np.random.seed(seed) - */ - __pyx_tuple__8 = PyTuple_Pack(3, __pyx_n_s_torch, __pyx_n_s_backends, __pyx_n_s_cudnn); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":109 - * file = Path(dir) / 'tmp.txt' - * try: - * with open(file, 'w'): # open file with write permissions # <<<<<<<<<<<<<< - * pass - * file.unlink() # remove file - */ - __pyx_tuple__12 = PyTuple_Pack(3, Py_None, Py_None, Py_None); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":122 - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str - * return len(s.encode().decode('ascii', 'ignore')) == len(s) # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__13 = PyTuple_Pack(2, __pyx_n_u_ascii, __pyx_n_u_ignore); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":169 - * def url2file(url): - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ # <<<<<<<<<<<<<< - * file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth - * return file - */ - __pyx_tuple__19 = PyTuple_Pack(2, __pyx_kp_u__17, __pyx_kp_u__18); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 169, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":222 - * - * labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - * classes = labels[:, 0].astype(np.int) # labels = [class xywh] # <<<<<<<<<<<<<< - * weights = np.bincount(classes, minlength=nc) # occurrences per class - * - */ - __pyx_slice__23 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__23)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__23); - __Pyx_GIVEREF(__pyx_slice__23); - __pyx_tuple__24 = PyTuple_Pack(2, __pyx_slice__23, __pyx_int_0); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 222, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__24); - __Pyx_GIVEREF(__pyx_tuple__24); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":246 - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center # <<<<<<<<<<<<<< - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - * y[:, 2] = x[:, 2] - x[:, 0] # width - */ - __pyx_tuple__25 = PyTuple_Pack(2, __pyx_slice__23, __pyx_int_2); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 246, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__25); - __Pyx_GIVEREF(__pyx_tuple__25); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":247 - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - * y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - * y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center # <<<<<<<<<<<<<< - * y[:, 2] = x[:, 2] - x[:, 0] # width - * y[:, 3] = x[:, 3] - x[:, 1] # height - */ - __pyx_tuple__26 = PyTuple_Pack(2, __pyx_slice__23, __pyx_int_1); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__26); - __Pyx_GIVEREF(__pyx_tuple__26); - __pyx_tuple__27 = PyTuple_Pack(2, __pyx_slice__23, __pyx_int_3); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(0, 247, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__27); - __Pyx_GIVEREF(__pyx_tuple__27); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":304 - * coords[:, [0, 2]] -= pad[0] # x padding - * coords[:, [1, 3]] -= pad[1] # y padding - * coords[:, :4] /= gain # <<<<<<<<<<<<<< - * clip_coords(coords, img0_shape) - * return coords - */ - __pyx_slice__28 = PySlice_New(Py_None, __pyx_int_4, Py_None); if (unlikely(!__pyx_slice__28)) __PYX_ERR(0, 304, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__28); - __Pyx_GIVEREF(__pyx_slice__28); - __pyx_tuple__29 = PyTuple_Pack(2, __pyx_slice__23, __pyx_slice__28); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 304, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__29); - __Pyx_GIVEREF(__pyx_tuple__29); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":330 - * - * nc = prediction.shape[2] - 5 # number of classes - * xc = prediction[..., 4] > conf_thres # candidates # <<<<<<<<<<<<<< - * - * # Checks - */ - __pyx_tuple__30 = PyTuple_Pack(2, Py_Ellipsis, __pyx_int_4); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(0, 330, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__30); - __Pyx_GIVEREF(__pyx_tuple__30); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":345 - * - * t = time.time() - * output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] # <<<<<<<<<<<<<< - * for xi, x in enumerate(prediction): # image index, image inference - * # Apply constraints - */ - __pyx_tuple__31 = PyTuple_Pack(2, __pyx_int_0, __pyx_int_6); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__31); - __Pyx_GIVEREF(__pyx_tuple__31); - __pyx_tuple__32 = PyTuple_Pack(1, __pyx_tuple__31); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(0, 345, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__32); - __Pyx_GIVEREF(__pyx_tuple__32); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":355 - * l = labels[xi] - * v = torch.zeros((len(l), nc + 5), device=x.device) - * v[:, :4] = l[:, 1:5] # box # <<<<<<<<<<<<<< - * v[:, 4] = 1.0 # conf - * v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - */ - __pyx_slice__33 = PySlice_New(__pyx_int_1, __pyx_int_5, Py_None); if (unlikely(!__pyx_slice__33)) __PYX_ERR(0, 355, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__33); - __Pyx_GIVEREF(__pyx_slice__33); - __pyx_tuple__34 = PyTuple_Pack(2, __pyx_slice__23, __pyx_slice__33); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 355, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__34); - __Pyx_GIVEREF(__pyx_tuple__34); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":356 - * v = torch.zeros((len(l), nc + 5), device=x.device) - * v[:, :4] = l[:, 1:5] # box - * v[:, 4] = 1.0 # conf # <<<<<<<<<<<<<< - * v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls - * x = torch.cat((x, v), 0) - */ - __pyx_tuple__35 = PyTuple_Pack(2, __pyx_slice__23, __pyx_int_4); if (unlikely(!__pyx_tuple__35)) __PYX_ERR(0, 356, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__35); - __Pyx_GIVEREF(__pyx_tuple__35); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":365 - * - * # Compute conf - * x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # <<<<<<<<<<<<<< - * - * # Box (center x, center y, width, height) to (x1, y1, x2, y2) - */ - __pyx_slice__36 = PySlice_New(__pyx_int_5, Py_None, Py_None); if (unlikely(!__pyx_slice__36)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__36); - __Pyx_GIVEREF(__pyx_slice__36); - __pyx_tuple__37 = PyTuple_Pack(2, __pyx_slice__23, __pyx_slice__36); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__37); - __Pyx_GIVEREF(__pyx_tuple__37); - __pyx_slice__38 = PySlice_New(__pyx_int_4, __pyx_int_5, Py_None); if (unlikely(!__pyx_slice__38)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__38); - __Pyx_GIVEREF(__pyx_slice__38); - __pyx_tuple__39 = PyTuple_Pack(2, __pyx_slice__23, __pyx_slice__38); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(0, 365, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__39); - __Pyx_GIVEREF(__pyx_tuple__39); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":373 - * if multi_label: - * i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) # <<<<<<<<<<<<<< - * else: # best class only - * conf, j = x[:, 5:].max(1, keepdim=True) - */ - __pyx_tuple__40 = PyTuple_Pack(2, __pyx_slice__23, Py_None); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(0, 373, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__40); - __Pyx_GIVEREF(__pyx_tuple__40); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":375 - * x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - * else: # best class only - * conf, j = x[:, 5:].max(1, keepdim=True) # <<<<<<<<<<<<<< - * x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] - * - */ - __pyx_tuple__41 = PyTuple_Pack(1, __pyx_int_1); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 375, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__41); - __Pyx_GIVEREF(__pyx_tuple__41); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":380 - * # Filter by class - * if classes is not None: - * x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # <<<<<<<<<<<<<< - * - * # Apply finite constraint - */ - __pyx_slice__42 = PySlice_New(__pyx_int_5, __pyx_int_6, Py_None); if (unlikely(!__pyx_slice__42)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__42); - __Pyx_GIVEREF(__pyx_slice__42); - __pyx_tuple__43 = PyTuple_Pack(2, __pyx_slice__23, __pyx_slice__42); if (unlikely(!__pyx_tuple__43)) __PYX_ERR(0, 380, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__43); - __Pyx_GIVEREF(__pyx_tuple__43); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":31 - * ROOT = FILE.parents[1] # YOLOv5 root directory - * NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads - * VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode # <<<<<<<<<<<<<< - * - * torch.set_printoptions(linewidth=320, precision=5, profile='long') - */ - __pyx_tuple__44 = PyTuple_Pack(2, __pyx_n_u_YOLOv5_VERBOSE, Py_True); if (unlikely(!__pyx_tuple__44)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__44); - __Pyx_GIVEREF(__pyx_tuple__44); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":36 - * np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 - * pd.options.display.max_columns = 10 - * cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) # <<<<<<<<<<<<<< - * os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads - * - */ - __pyx_tuple__45 = PyTuple_Pack(1, __pyx_int_0); if (unlikely(!__pyx_tuple__45)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__45); - __Pyx_GIVEREF(__pyx_tuple__45); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":40 - * - * - * def set_logging(name=None, verbose=VERBOSE): # <<<<<<<<<<<<<< - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - */ - __pyx_tuple__46 = PyTuple_Pack(3, __pyx_n_s_name, __pyx_n_s_verbose, __pyx_n_s_rank); if (unlikely(!__pyx_tuple__46)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__46); - __Pyx_GIVEREF(__pyx_tuple__46); - __pyx_codeobj__47 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__46, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_set_logging, 40, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__47)) __PYX_ERR(0, 40, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":47 - * - * - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) # <<<<<<<<<<<<<< - * - * def try_except(func): - */ - __pyx_tuple__48 = PyTuple_Pack(1, __pyx_n_u_yolov5); if (unlikely(!__pyx_tuple__48)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__48); - __Pyx_GIVEREF(__pyx_tuple__48); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":49 - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) - * - * def try_except(func): # <<<<<<<<<<<<<< - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - */ - __pyx_tuple__49 = PyTuple_Pack(3, __pyx_n_s_func, __pyx_n_s_handler, __pyx_n_s_handler); if (unlikely(!__pyx_tuple__49)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__49); - __Pyx_GIVEREF(__pyx_tuple__49); - __pyx_codeobj__50 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__49, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_try_except, 49, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__50)) __PYX_ERR(0, 49, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":60 - * - * - * def methods(instance): # <<<<<<<<<<<<<< - * # Get class/instance methods - * return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] - */ - __pyx_tuple__51 = PyTuple_Pack(2, __pyx_n_s_instance, __pyx_n_s_f); if (unlikely(!__pyx_tuple__51)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__51); - __Pyx_GIVEREF(__pyx_tuple__51); - __pyx_codeobj__52 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__51, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_methods, 60, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__52)) __PYX_ERR(0, 60, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":65 - * - * - * def print_args(name, opt): # <<<<<<<<<<<<<< - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - */ - __pyx_tuple__53 = PyTuple_Pack(4, __pyx_n_s_name, __pyx_n_s_opt, __pyx_n_s_genexpr, __pyx_n_s_genexpr); if (unlikely(!__pyx_tuple__53)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__53); - __Pyx_GIVEREF(__pyx_tuple__53); - __pyx_codeobj__54 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__53, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_print_args, 65, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__54)) __PYX_ERR(0, 65, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":70 - * - * - * def init_seeds(seed=0): # <<<<<<<<<<<<<< - * # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - */ - __pyx_tuple__55 = PyTuple_Pack(2, __pyx_n_s_seed, __pyx_n_s_cudnn); if (unlikely(!__pyx_tuple__55)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__55); - __Pyx_GIVEREF(__pyx_tuple__55); - __pyx_codeobj__56 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__55, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_init_seeds, 70, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__56)) __PYX_ERR(0, 70, __pyx_L1_error) - __pyx_tuple__57 = PyTuple_Pack(1, ((PyObject *)__pyx_int_0)); if (unlikely(!__pyx_tuple__57)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__57); - __Pyx_GIVEREF(__pyx_tuple__57); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":80 - * - * - * def intersect_dicts(da, db, exclude=()): # <<<<<<<<<<<<<< - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - */ - __pyx_tuple__58 = PyTuple_Pack(7, __pyx_n_s_da, __pyx_n_s_db, __pyx_n_s_exclude, __pyx_n_s_k, __pyx_n_s_v, __pyx_n_s_genexpr, __pyx_n_s_genexpr); if (unlikely(!__pyx_tuple__58)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__58); - __Pyx_GIVEREF(__pyx_tuple__58); - __pyx_codeobj__59 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__58, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_intersect_dicts, 80, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__59)) __PYX_ERR(0, 80, __pyx_L1_error) - __pyx_tuple__60 = PyTuple_Pack(1, ((PyObject*)__pyx_empty_tuple)); if (unlikely(!__pyx_tuple__60)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__60); - __Pyx_GIVEREF(__pyx_tuple__60); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":85 - * - * - * def get_latest_run(search_dir='.'): # <<<<<<<<<<<<<< - * # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - * last_list = glob.glob(f'{search_dir}/[inserted by cython to avoid comment start]**[inserted by cython to avoid comment closer]/last*.pt', recursive=True) - */ - __pyx_tuple__61 = PyTuple_Pack(2, __pyx_n_s_search_dir, __pyx_n_s_last_list); if (unlikely(!__pyx_tuple__61)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__61); - __Pyx_GIVEREF(__pyx_tuple__61); - __pyx_codeobj__62 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__61, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_get_latest_run, 85, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__62)) __PYX_ERR(0, 85, __pyx_L1_error) - __pyx_tuple__63 = PyTuple_Pack(1, ((PyObject*)__pyx_kp_u__10)); if (unlikely(!__pyx_tuple__63)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__63); - __Pyx_GIVEREF(__pyx_tuple__63); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":91 - * - * - * def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): # <<<<<<<<<<<<<< - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) - */ - __pyx_tuple__64 = PyTuple_Pack(5, __pyx_n_s_dir, __pyx_n_s_env_var, __pyx_n_s_env, __pyx_n_s_path, __pyx_n_s_cfg); if (unlikely(!__pyx_tuple__64)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__64); - __Pyx_GIVEREF(__pyx_tuple__64); - __pyx_codeobj__65 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__64, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_user_config_dir, 91, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__65)) __PYX_ERR(0, 91, __pyx_L1_error) - __pyx_tuple__66 = PyTuple_Pack(2, ((PyObject*)__pyx_n_u_Ultralytics), ((PyObject*)__pyx_n_u_YOLOV5_CONFIG_DIR)); if (unlikely(!__pyx_tuple__66)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__66); - __Pyx_GIVEREF(__pyx_tuple__66); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":104 - * - * - * def is_writeable(dir, test=False): # <<<<<<<<<<<<<< - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 - */ - __pyx_tuple__67 = PyTuple_Pack(3, __pyx_n_s_dir, __pyx_n_s_test, __pyx_n_s_file_2); if (unlikely(!__pyx_tuple__67)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__67); - __Pyx_GIVEREF(__pyx_tuple__67); - __pyx_codeobj__68 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__67, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_is_writeable, 104, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__68)) __PYX_ERR(0, 104, __pyx_L1_error) - __pyx_tuple__69 = PyTuple_Pack(1, ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__69)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__69); - __Pyx_GIVEREF(__pyx_tuple__69); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":119 - * - * - * def is_ascii(s=''): # <<<<<<<<<<<<<< - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str - */ - __pyx_tuple__70 = PyTuple_Pack(1, __pyx_n_s_s); if (unlikely(!__pyx_tuple__70)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__70); - __Pyx_GIVEREF(__pyx_tuple__70); - __pyx_codeobj__71 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__70, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_is_ascii, 119, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__71)) __PYX_ERR(0, 119, __pyx_L1_error) - __pyx_tuple__72 = PyTuple_Pack(1, ((PyObject*)__pyx_kp_u__11)); if (unlikely(!__pyx_tuple__72)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__72); - __Pyx_GIVEREF(__pyx_tuple__72); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":125 - * - * - * def is_chinese(s=''): # <<<<<<<<<<<<<< - * # Is string composed of any Chinese characters? - * return re.search('[\u4e00-\u9fff]', s) - */ - __pyx_codeobj__73 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__70, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_is_chinese, 125, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__73)) __PYX_ERR(0, 125, __pyx_L1_error) - __pyx_tuple__74 = PyTuple_Pack(1, ((PyObject*)__pyx_n_u__14)); if (unlikely(!__pyx_tuple__74)) __PYX_ERR(0, 125, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__74); - __Pyx_GIVEREF(__pyx_tuple__74); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":130 - * - * - * def emojis(str=''): # <<<<<<<<<<<<<< - * # Return platform-dependent emoji-safe version of string - * return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - */ - __pyx_tuple__75 = PyTuple_Pack(1, __pyx_n_s_str); if (unlikely(!__pyx_tuple__75)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__75); - __Pyx_GIVEREF(__pyx_tuple__75); - __pyx_codeobj__76 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__75, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_emojis, 130, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__76)) __PYX_ERR(0, 130, __pyx_L1_error) - __pyx_tuple__77 = PyTuple_Pack(1, ((PyObject*)__pyx_kp_u__11)); if (unlikely(!__pyx_tuple__77)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__77); - __Pyx_GIVEREF(__pyx_tuple__77); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":135 - * - * - * def file_size(path): # <<<<<<<<<<<<<< - * # Return file/dir size (MB) - * path = Path(path) - */ - __pyx_tuple__78 = PyTuple_Pack(3, __pyx_n_s_path, __pyx_n_s_genexpr, __pyx_n_s_genexpr); if (unlikely(!__pyx_tuple__78)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__78); - __Pyx_GIVEREF(__pyx_tuple__78); - __pyx_codeobj__79 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__78, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_file_size, 135, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__79)) __PYX_ERR(0, 135, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":146 - * - * - * def check_python(minimum='3.6.2'): # <<<<<<<<<<<<<< - * # Check current python version vs. required python version - * check_version(platform.python_version(), minimum, name='Python ', hard=True) - */ - __pyx_tuple__80 = PyTuple_Pack(1, __pyx_n_s_minimum); if (unlikely(!__pyx_tuple__80)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__80); - __Pyx_GIVEREF(__pyx_tuple__80); - __pyx_codeobj__81 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__80, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_check_python, 146, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__81)) __PYX_ERR(0, 146, __pyx_L1_error) - __pyx_tuple__82 = PyTuple_Pack(1, ((PyObject*)__pyx_kp_u_3_6_2)); if (unlikely(!__pyx_tuple__82)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__82); - __Pyx_GIVEREF(__pyx_tuple__82); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":151 - * - * - * def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # <<<<<<<<<<<<<< - * # Check version vs. required version - * return True - */ - __pyx_tuple__83 = PyTuple_Pack(6, __pyx_n_s_current, __pyx_n_s_minimum, __pyx_n_s_name, __pyx_n_s_pinned, __pyx_n_s_hard, __pyx_n_s_verbose); if (unlikely(!__pyx_tuple__83)) __PYX_ERR(0, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__83); - __Pyx_GIVEREF(__pyx_tuple__83); - __pyx_codeobj__84 = (PyObject*)__Pyx_PyCode_New(6, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__83, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_check_version, 151, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__84)) __PYX_ERR(0, 151, __pyx_L1_error) - __pyx_tuple__85 = PyTuple_Pack(6, ((PyObject*)__pyx_kp_u_0_0_0), ((PyObject*)__pyx_kp_u_0_0_0), ((PyObject*)__pyx_kp_u_version), ((PyObject *)Py_False), ((PyObject *)Py_False), ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__85)) __PYX_ERR(0, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__85); - __Pyx_GIVEREF(__pyx_tuple__85); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":156 - * - * - * def check_img_size(imgsz, s=32, floor=0): # <<<<<<<<<<<<<< - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 - */ - __pyx_tuple__86 = PyTuple_Pack(5, __pyx_n_s_imgsz, __pyx_n_s_s, __pyx_n_s_floor, __pyx_n_s_new_size, __pyx_n_s_x); if (unlikely(!__pyx_tuple__86)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__86); - __Pyx_GIVEREF(__pyx_tuple__86); - __pyx_codeobj__87 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__86, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_check_img_size, 156, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__87)) __PYX_ERR(0, 156, __pyx_L1_error) - __pyx_tuple__88 = PyTuple_Pack(2, ((PyObject *)__pyx_int_32), ((PyObject *)__pyx_int_0)); if (unlikely(!__pyx_tuple__88)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__88); - __Pyx_GIVEREF(__pyx_tuple__88); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":167 - * - * - * def url2file(url): # <<<<<<<<<<<<<< - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - */ - __pyx_tuple__89 = PyTuple_Pack(2, __pyx_n_s_url, __pyx_n_s_file_2); if (unlikely(!__pyx_tuple__89)) __PYX_ERR(0, 167, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__89); - __Pyx_GIVEREF(__pyx_tuple__89); - __pyx_codeobj__90 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__89, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_url2file, 167, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__90)) __PYX_ERR(0, 167, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":174 - * - * - * def make_divisible(x, divisor): # <<<<<<<<<<<<<< - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): - */ - __pyx_tuple__91 = PyTuple_Pack(2, __pyx_n_s_x, __pyx_n_s_divisor); if (unlikely(!__pyx_tuple__91)) __PYX_ERR(0, 174, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__91); - __Pyx_GIVEREF(__pyx_tuple__91); - __pyx_codeobj__92 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__91, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_make_divisible, 174, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__92)) __PYX_ERR(0, 174, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":181 - * - * - * def clean_str(s): # <<<<<<<<<<<<<< - * # Cleans a string by replacing special characters with underscore _ - * return re.sub(pattern="[|@#!$%&()=?^*;:,><+]", repl="_", string=s) - */ - __pyx_codeobj__93 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__70, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_clean_str, 181, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__93)) __PYX_ERR(0, 181, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":186 - * - * - * def one_cycle(y1=0.0, y2=1.0, steps=100): # <<<<<<<<<<<<<< - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - */ - __pyx_tuple__94 = PyTuple_Pack(3, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_steps); if (unlikely(!__pyx_tuple__94)) __PYX_ERR(0, 186, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__94); - __Pyx_GIVEREF(__pyx_tuple__94); - __pyx_codeobj__95 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__94, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_one_cycle, 186, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__95)) __PYX_ERR(0, 186, __pyx_L1_error) - __pyx_tuple__96 = PyTuple_Pack(3, ((PyObject*)__pyx_float_0_0), ((PyObject*)__pyx_float_1_0), ((PyObject *)__pyx_int_100)); if (unlikely(!__pyx_tuple__96)) __PYX_ERR(0, 186, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__96); - __Pyx_GIVEREF(__pyx_tuple__96); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":191 - * - * - * def colorstr(*input): # <<<<<<<<<<<<<< - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - */ - __pyx_tuple__97 = PyTuple_Pack(6, __pyx_n_s_input, __pyx_n_s_args, __pyx_n_s_string, __pyx_n_s_colors, __pyx_n_s_genexpr, __pyx_n_s_genexpr); if (unlikely(!__pyx_tuple__97)) __PYX_ERR(0, 191, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__97); - __Pyx_GIVEREF(__pyx_tuple__97); - __pyx_codeobj__98 = (PyObject*)__Pyx_PyCode_New(0, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARARGS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__97, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_colorstr, 191, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__98)) __PYX_ERR(0, 191, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":216 - * - * - * def labels_to_class_weights(labels, nc=80): # <<<<<<<<<<<<<< - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded - */ - __pyx_tuple__99 = PyTuple_Pack(4, __pyx_n_s_labels, __pyx_n_s_nc, __pyx_n_s_classes, __pyx_n_s_weights); if (unlikely(!__pyx_tuple__99)) __PYX_ERR(0, 216, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__99); - __Pyx_GIVEREF(__pyx_tuple__99); - __pyx_codeobj__100 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__99, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_labels_to_class_weights, 216, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__100)) __PYX_ERR(0, 216, __pyx_L1_error) - __pyx_tuple__101 = PyTuple_Pack(1, ((PyObject *)__pyx_int_80)); if (unlikely(!__pyx_tuple__101)) __PYX_ERR(0, 216, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__101); - __Pyx_GIVEREF(__pyx_tuple__101); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":235 - * - * - * def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # <<<<<<<<<<<<<< - * # Produces image weights based on class_weights and image contents - * class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) - */ - __pyx_tuple__102 = PyTuple_Pack(6, __pyx_n_s_labels, __pyx_n_s_nc, __pyx_n_s_class_weights, __pyx_n_s_class_counts, __pyx_n_s_image_weights, __pyx_n_s_x); if (unlikely(!__pyx_tuple__102)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__102); - __Pyx_GIVEREF(__pyx_tuple__102); - __pyx_codeobj__103 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__102, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_labels_to_image_weights, 235, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__103)) __PYX_ERR(0, 235, __pyx_L1_error) - __pyx_tuple__104 = PyTuple_Pack(1, __pyx_int_80); if (unlikely(!__pyx_tuple__104)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__104); - __Pyx_GIVEREF(__pyx_tuple__104); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":243 - * - * - * def xyxy2xywh(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_tuple__105 = PyTuple_Pack(2, __pyx_n_s_x, __pyx_n_s_y); if (unlikely(!__pyx_tuple__105)) __PYX_ERR(0, 243, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__105); - __Pyx_GIVEREF(__pyx_tuple__105); - __pyx_codeobj__106 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__105, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_xyxy2xywh, 243, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__106)) __PYX_ERR(0, 243, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":253 - * - * - * def xywh2xyxy(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_codeobj__107 = (PyObject*)__Pyx_PyCode_New(1, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__105, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_xywh2xyxy, 253, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__107)) __PYX_ERR(0, 253, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":263 - * - * - * def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_tuple__108 = PyTuple_Pack(6, __pyx_n_s_x, __pyx_n_s_w, __pyx_n_s_h, __pyx_n_s_padw, __pyx_n_s_padh, __pyx_n_s_y); if (unlikely(!__pyx_tuple__108)) __PYX_ERR(0, 263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__108); - __Pyx_GIVEREF(__pyx_tuple__108); - __pyx_codeobj__109 = (PyObject*)__Pyx_PyCode_New(5, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__108, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_xywhn2xyxy, 263, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__109)) __PYX_ERR(0, 263, __pyx_L1_error) - __pyx_tuple__110 = PyTuple_Pack(4, ((PyObject *)__pyx_int_640), ((PyObject *)__pyx_int_640), ((PyObject *)__pyx_int_0), ((PyObject *)__pyx_int_0)); if (unlikely(!__pyx_tuple__110)) __PYX_ERR(0, 263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__110); - __Pyx_GIVEREF(__pyx_tuple__110); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":273 - * - * - * def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: - */ - __pyx_tuple__111 = PyTuple_Pack(6, __pyx_n_s_x, __pyx_n_s_w, __pyx_n_s_h, __pyx_n_s_clip, __pyx_n_s_eps, __pyx_n_s_y); if (unlikely(!__pyx_tuple__111)) __PYX_ERR(0, 273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__111); - __Pyx_GIVEREF(__pyx_tuple__111); - __pyx_codeobj__112 = (PyObject*)__Pyx_PyCode_New(5, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__111, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_xyxy2xywhn, 273, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__112)) __PYX_ERR(0, 273, __pyx_L1_error) - __pyx_tuple__113 = PyTuple_Pack(4, ((PyObject *)__pyx_int_640), ((PyObject *)__pyx_int_640), ((PyObject *)Py_False), ((PyObject*)__pyx_float_0_0)); if (unlikely(!__pyx_tuple__113)) __PYX_ERR(0, 273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__113); - __Pyx_GIVEREF(__pyx_tuple__113); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":285 - * - * - * def xyn2xy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert normalized segments into pixel segments, shape (n,2) - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_codeobj__114 = (PyObject*)__Pyx_PyCode_New(5, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__108, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_xyn2xy, 285, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__114)) __PYX_ERR(0, 285, __pyx_L1_error) - __pyx_tuple__115 = PyTuple_Pack(4, ((PyObject *)__pyx_int_640), ((PyObject *)__pyx_int_640), ((PyObject *)__pyx_int_0), ((PyObject *)__pyx_int_0)); if (unlikely(!__pyx_tuple__115)) __PYX_ERR(0, 285, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__115); - __Pyx_GIVEREF(__pyx_tuple__115); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":293 - * - * - * def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # <<<<<<<<<<<<<< - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape - */ - __pyx_tuple__116 = PyTuple_Pack(6, __pyx_n_s_img1_shape, __pyx_n_s_coords, __pyx_n_s_img0_shape, __pyx_n_s_ratio_pad, __pyx_n_s_gain, __pyx_n_s_pad); if (unlikely(!__pyx_tuple__116)) __PYX_ERR(0, 293, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__116); - __Pyx_GIVEREF(__pyx_tuple__116); - __pyx_codeobj__117 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__116, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_scale_coords, 293, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__117)) __PYX_ERR(0, 293, __pyx_L1_error) - __pyx_tuple__118 = PyTuple_Pack(1, ((PyObject *)Py_None)); if (unlikely(!__pyx_tuple__118)) __PYX_ERR(0, 293, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__118); - __Pyx_GIVEREF(__pyx_tuple__118); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":309 - * - * - * def clip_coords(boxes, shape): # <<<<<<<<<<<<<< - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually - */ - __pyx_tuple__119 = PyTuple_Pack(2, __pyx_n_s_boxes, __pyx_n_s_shape); if (unlikely(!__pyx_tuple__119)) __PYX_ERR(0, 309, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__119); - __Pyx_GIVEREF(__pyx_tuple__119); - __pyx_codeobj__120 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__119, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_clip_coords, 309, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__120)) __PYX_ERR(0, 309, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":321 - * - * - * def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, # <<<<<<<<<<<<<< - * labels=(), max_det=300): - * """Runs Non-Maximum Suppression (NMS) on inference results - */ - __pyx_tuple__121 = PyTuple_Pack(32, __pyx_n_s_prediction, __pyx_n_s_conf_thres, __pyx_n_s_iou_thres, __pyx_n_s_classes, __pyx_n_s_agnostic, __pyx_n_s_multi_label, __pyx_n_s_labels, __pyx_n_s_max_det, __pyx_n_s_nc, __pyx_n_s_xc, __pyx_n_s_min_wh, __pyx_n_s_max_wh, __pyx_n_s_max_nms, __pyx_n_s_time_limit, __pyx_n_s_redundant, __pyx_n_s_merge, __pyx_n_s_t, __pyx_n_s_output, __pyx_n_s_xi, __pyx_n_s_x, __pyx_n_s_l, __pyx_n_s_v, __pyx_n_s_box, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_conf, __pyx_n_s_n, __pyx_n_s_c, __pyx_n_s_boxes, __pyx_n_s_scores, __pyx_n_s_iou, __pyx_n_s_weights); if (unlikely(!__pyx_tuple__121)) __PYX_ERR(0, 321, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__121); - __Pyx_GIVEREF(__pyx_tuple__121); - __pyx_codeobj__122 = (PyObject*)__Pyx_PyCode_New(8, 0, 0, 32, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__121, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_non_max_suppression, 321, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__122)) __PYX_ERR(0, 321, __pyx_L1_error) - __pyx_tuple__123 = PyTuple_Pack(7, ((PyObject*)__pyx_float_0_25), ((PyObject*)__pyx_float_0_45), ((PyObject *)Py_None), ((PyObject *)Py_False), ((PyObject *)Py_False), ((PyObject*)__pyx_empty_tuple), ((PyObject *)__pyx_int_300)); if (unlikely(!__pyx_tuple__123)) __PYX_ERR(0, 321, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__123); - __Pyx_GIVEREF(__pyx_tuple__123); - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":415 - * - * - * def increment_path(path, exist_ok=False, sep='', mkdir=False): # <<<<<<<<<<<<<< - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic - */ - __pyx_tuple__124 = PyTuple_Pack(11, __pyx_n_s_path, __pyx_n_s_exist_ok, __pyx_n_s_sep, __pyx_n_s_mkdir, __pyx_n_s_suffix, __pyx_n_s_dirs, __pyx_n_s_matches, __pyx_n_s_i, __pyx_n_s_n, __pyx_n_s_d_2, __pyx_n_s_m); if (unlikely(!__pyx_tuple__124)) __PYX_ERR(0, 415, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__124); - __Pyx_GIVEREF(__pyx_tuple__124); - __pyx_codeobj__125 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__124, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_n_s_increment_path, 415, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__125)) __PYX_ERR(0, 415, __pyx_L1_error) - __pyx_tuple__126 = PyTuple_Pack(3, ((PyObject *)Py_False), ((PyObject*)__pyx_kp_u__11), ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__126)) __PYX_ERR(0, 415, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__126); - __Pyx_GIVEREF(__pyx_tuple__126); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} -/* #### Code section: init_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) { - __pyx_umethod_PyDict_Type_get.type = (PyObject*)&PyDict_Type; - __pyx_umethod_PyDict_Type_get.method_name = &__pyx_n_s_get; - #if CYTHON_USE_MODULE_STATE - if (__Pyx_InitString(__pyx_string_tab[0], &__pyx_kp_u_0_0_0) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[1], &__pyx_kp_u_0m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[2], &__pyx_kp_u_11_5g) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[3], &__pyx_kp_u_1m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[4], &__pyx_kp_u_30m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[5], &__pyx_kp_u_31m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[6], &__pyx_kp_u_32m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[7], &__pyx_kp_u_33m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[8], &__pyx_kp_u_34m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[9], &__pyx_kp_u_35m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[10], &__pyx_kp_u_36m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[11], &__pyx_kp_u_37m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[12], &__pyx_kp_u_3_6_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[13], &__pyx_kp_u_4m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[14], &__pyx_kp_u_90m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[15], &__pyx_kp_u_91m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[16], &__pyx_kp_u_92m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[17], &__pyx_kp_u_93m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[18], &__pyx_kp_u_94m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[19], &__pyx_kp_u_95m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[20], &__pyx_kp_u_96m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[21], &__pyx_kp_u_97m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[22], &__pyx_kp_u_AppData_Roaming) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[23], &__pyx_n_s_AssertionError) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[24], &__pyx_n_u_Darwin) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[25], &__pyx_n_s_FILE) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[26], &__pyx_n_s_INFO) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[27], &__pyx_kp_u_Invalid_Confidence_threshold) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[28], &__pyx_kp_u_Invalid_IoU) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[29], &__pyx_n_s_LOGGER) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[30], &__pyx_kp_u_Library_Application_Support) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[31], &__pyx_n_u_Linux) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[32], &__pyx_n_s_NCOLS) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[33], &__pyx_n_u_NUMEXPR_MAX_THREADS) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[34], &__pyx_n_s_NUM_THREADS) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[35], &__pyx_n_s_OSError) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[36], &__pyx_n_s_Path) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[37], &__pyx_kp_u_Python) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[38], &__pyx_n_u_RANK) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[39], &__pyx_n_s_ROOT) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[40], &__pyx_n_s_R_OK) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[41], &__pyx_n_s_T) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[42], &__pyx_n_s_Tensor) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[43], &__pyx_n_u_Ultralytics) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[44], &__pyx_n_s_VERBOSE) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[45], &__pyx_n_s_WARNING) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[46], &__pyx_kp_u_WARNING_NMS_time_limit) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[47], &__pyx_kp_u_WARNING_img_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[48], &__pyx_n_u_Windows) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[49], &__pyx_n_u_YOLOV5_CONFIG_DIR) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[50], &__pyx_n_u_YOLOv5_VERBOSE) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[51], &__pyx_kp_u__10) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[52], &__pyx_kp_u__11) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[53], &__pyx_n_u__14) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[54], &__pyx_kp_u__15) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[55], &__pyx_kp_u__16) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[56], &__pyx_kp_u__17) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[57], &__pyx_kp_u__18) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[58], &__pyx_n_s__20) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[59], &__pyx_kp_u__20) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[60], &__pyx_kp_u__21) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[61], &__pyx_n_u__22) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[62], &__pyx_n_u__4) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[63], &__pyx_kp_u__5) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[64], &__pyx_kp_u__6) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[65], &__pyx_kp_u__7) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[66], &__pyx_n_s__9) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[67], &__pyx_kp_u__9) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[68], &__pyx_n_s_access) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[69], &__pyx_n_s_agnostic) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[70], &__pyx_n_s_any) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[71], &__pyx_n_s_args) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[72], &__pyx_n_s_argsort) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[73], &__pyx_n_s_array) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[74], &__pyx_n_s_as_tuple) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[75], &__pyx_n_u_ascii) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[76], &__pyx_n_s_astype) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[77], &__pyx_n_s_asyncio_coroutines) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[78], &__pyx_n_s_backends) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[79], &__pyx_n_s_basicConfig) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[80], &__pyx_n_s_benchmark) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[81], &__pyx_n_s_bincount) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[82], &__pyx_n_u_black) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[83], &__pyx_n_u_blue) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[84], &__pyx_n_u_bold) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[85], &__pyx_n_s_box) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[86], &__pyx_n_s_box_iou) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[87], &__pyx_n_s_boxes) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[88], &__pyx_n_u_bright_black) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[89], &__pyx_n_u_bright_blue) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[90], &__pyx_n_u_bright_cyan) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[91], &__pyx_n_u_bright_green) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[92], &__pyx_n_u_bright_magenta) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[93], &__pyx_n_u_bright_red) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[94], &__pyx_n_u_bright_white) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[95], &__pyx_n_u_bright_yellow) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[96], &__pyx_n_s_c) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[97], &__pyx_n_s_cat) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[98], &__pyx_n_s_ceil) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[99], &__pyx_n_s_cfg) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[100], &__pyx_n_s_check_img_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[101], &__pyx_n_s_check_python) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[102], &__pyx_n_s_check_version) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[103], &__pyx_n_s_clamp) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[104], &__pyx_n_s_class_counts) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[105], &__pyx_n_s_class_getitem) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[106], &__pyx_n_s_class_weights) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[107], &__pyx_n_s_classes) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[108], &__pyx_n_s_clean_str) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[109], &__pyx_n_s_cline_in_traceback) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[110], &__pyx_n_s_clip) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[111], &__pyx_n_s_clip_coords) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[112], &__pyx_n_s_clone) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[113], &__pyx_n_s_close) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[114], &__pyx_n_s_colors) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[115], &__pyx_n_s_colorstr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[116], &__pyx_n_s_colorstr_locals_genexpr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[117], &__pyx_n_s_columns) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[118], &__pyx_n_s_concatenate) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[119], &__pyx_n_s_conf) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[120], &__pyx_n_s_conf_thres) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[121], &__pyx_kp_u_config) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[122], &__pyx_n_s_coords) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[123], &__pyx_n_s_copy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[124], &__pyx_n_s_cos) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[125], &__pyx_n_s_cpu_count) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[126], &__pyx_n_s_cudnn) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[127], &__pyx_n_s_current) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[128], &__pyx_n_s_cv2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[129], &__pyx_n_u_cyan) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[130], &__pyx_kp_u_d) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[131], &__pyx_n_s_d_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[132], &__pyx_n_s_da) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[133], &__pyx_n_s_db) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[134], &__pyx_n_s_decode) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[135], &__pyx_n_s_descending) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[136], &__pyx_n_s_deterministic) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[137], &__pyx_n_s_device) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[138], &__pyx_n_s_dir) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[139], &__pyx_n_s_dirs) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[140], &__pyx_kp_u_disable) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[141], &__pyx_n_s_display) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[142], &__pyx_n_s_divisor) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[143], &__pyx_n_s_e) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[144], &__pyx_n_s_emojis) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[145], &__pyx_kp_u_enable) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[146], &__pyx_n_s_encode) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[147], &__pyx_n_u_end) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[148], &__pyx_n_s_enter) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[149], &__pyx_n_s_enumerate) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[150], &__pyx_n_s_env) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[151], &__pyx_n_s_env_var) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[152], &__pyx_n_s_environ) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[153], &__pyx_n_s_eps) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[154], &__pyx_n_s_exclude) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[155], &__pyx_n_s_exist_ok) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[156], &__pyx_n_s_exists) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[157], &__pyx_n_s_exit) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[158], &__pyx_n_s_f) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[159], &__pyx_n_s_file) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[160], &__pyx_n_s_file_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[161], &__pyx_n_s_file_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[162], &__pyx_n_s_file_size_locals_genexpr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[163], &__pyx_n_s_fitness) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[164], &__pyx_n_s_float) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[165], &__pyx_n_u_float_kind) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[166], &__pyx_n_s_floor) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[167], &__pyx_n_s_format) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[168], &__pyx_n_s_formatter) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[169], &__pyx_n_s_from_numpy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[170], &__pyx_n_s_func) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[171], &__pyx_n_s_gain) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[172], &__pyx_kp_u_gc) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[173], &__pyx_n_s_genexpr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[174], &__pyx_n_s_get) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[175], &__pyx_n_s_getLogger) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[176], &__pyx_n_s_get_latest_run) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[177], &__pyx_n_s_get_terminal_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[178], &__pyx_n_s_getctime) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[179], &__pyx_n_s_getenv) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[180], &__pyx_n_s_glob) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[181], &__pyx_n_u_green) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[182], &__pyx_n_s_groups) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[183], &__pyx_n_s_h) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[184], &__pyx_n_s_handler) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[185], &__pyx_n_s_hard) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[186], &__pyx_n_s_home) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[187], &__pyx_n_s_i) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[188], &__pyx_n_u_ignore) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[189], &__pyx_n_s_image_weights) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[190], &__pyx_n_s_img0_shape) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[191], &__pyx_n_s_img1_shape) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[192], &__pyx_n_s_imgsz) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[193], &__pyx_n_s_import) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[194], &__pyx_n_s_increment_path) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[195], &__pyx_n_s_info) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[196], &__pyx_n_s_init_seeds) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[197], &__pyx_n_s_initializing) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[198], &__pyx_n_s_input) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[199], &__pyx_n_s_instance) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[200], &__pyx_n_s_int) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[201], &__pyx_n_s_intersect_dicts) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[202], &__pyx_n_s_intersect_dicts_locals_genexpr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[203], &__pyx_n_s_iou) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[204], &__pyx_n_s_iou_thres) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[205], &__pyx_n_s_is_ascii) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[206], &__pyx_n_s_is_chinese) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[207], &__pyx_n_s_is_coroutine) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[208], &__pyx_n_s_is_dir) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[209], &__pyx_n_s_is_file) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[210], &__pyx_n_s_is_writeable) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[211], &__pyx_kp_u_isenabled) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[212], &__pyx_n_s_items) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[213], &__pyx_n_s_j) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[214], &__pyx_n_s_k) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[215], &__pyx_n_s_keepdim) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[216], &__pyx_n_s_key) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[217], &__pyx_n_s_kwargs) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[218], &__pyx_n_s_l) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[219], &__pyx_n_s_labels) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[220], &__pyx_n_s_labels_to_class_weights) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[221], &__pyx_n_s_labels_to_image_weights) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[222], &__pyx_n_s_last_list) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[223], &__pyx_kp_u_last_pt) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[224], &__pyx_n_s_level) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[225], &__pyx_n_s_linewidth) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[226], &__pyx_n_s_logging) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[227], &__pyx_n_s_long) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[228], &__pyx_n_u_long) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[229], &__pyx_n_s_lower) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[230], &__pyx_n_s_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[231], &__pyx_n_u_magenta) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[232], &__pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[233], &__pyx_n_s_make_divisible) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[234], &__pyx_n_s_manual_seed) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[235], &__pyx_n_s_matches) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[236], &__pyx_n_s_math) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[237], &__pyx_n_s_max) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[238], &__pyx_n_s_max_columns) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[239], &__pyx_n_s_max_det) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[240], &__pyx_n_s_max_nms) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[241], &__pyx_n_s_max_wh) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[242], &__pyx_n_s_merge) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[243], &__pyx_kp_u_message_s) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[244], &__pyx_n_s_methods) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[245], &__pyx_n_s_min_wh) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[246], &__pyx_n_s_minimum) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[247], &__pyx_n_s_minlength) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[248], &__pyx_n_s_mkdir) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[249], &__pyx_n_s_mm) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[250], &__pyx_n_s_multi_label) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[251], &__pyx_kp_u_must_be_multiple_of_max_stride) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[252], &__pyx_n_s_n) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[253], &__pyx_n_s_name) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[254], &__pyx_n_s_name_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[255], &__pyx_n_s_nc) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[256], &__pyx_n_s_new_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[257], &__pyx_n_s_nms) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[258], &__pyx_n_s_non_max_suppression) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[259], &__pyx_n_s_nonzero) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[260], &__pyx_n_s_np) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[261], &__pyx_n_s_numpy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[262], &__pyx_n_s_one_cycle) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[263], &__pyx_n_s_one_cycle_locals_lambda) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[264], &__pyx_n_s_ones) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[265], &__pyx_n_s_open) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[266], &__pyx_n_s_ops) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[267], &__pyx_n_s_opt) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[268], &__pyx_n_s_options) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[269], &__pyx_n_s_os) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[270], &__pyx_n_s_output) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[271], &__pyx_n_s_pad) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[272], &__pyx_n_s_padh) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[273], &__pyx_n_s_padw) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[274], &__pyx_n_s_pandas) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[275], &__pyx_n_s_parents) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[276], &__pyx_n_s_parse) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[277], &__pyx_n_s_path) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[278], &__pyx_n_s_pathlib) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[279], &__pyx_n_s_pattern) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[280], &__pyx_n_s_pd) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[281], &__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[282], &__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[283], &__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[284], &__pyx_n_s_pi) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[285], &__pyx_n_s_pinned) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[286], &__pyx_n_s_platform) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[287], &__pyx_n_s_precision) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[288], &__pyx_n_s_prediction) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[289], &__pyx_n_s_print) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[290], &__pyx_n_s_print_args) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[291], &__pyx_n_s_print_args_locals_genexpr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[292], &__pyx_n_s_profile) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[293], &__pyx_n_s_python_version) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[294], &__pyx_n_s_random) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[295], &__pyx_n_s_range) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[296], &__pyx_n_s_rank) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[297], &__pyx_n_s_ratio_pad) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[298], &__pyx_n_s_re) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[299], &__pyx_n_s_recursive) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[300], &__pyx_n_u_red) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[301], &__pyx_n_s_redundant) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[302], &__pyx_n_s_repl) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[303], &__pyx_n_s_replace) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[304], &__pyx_n_s_reshape) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[305], &__pyx_n_s_resolve) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[306], &__pyx_n_s_s) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[307], &__pyx_kp_u_s_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[308], &__pyx_kp_u_s_exceeded) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[309], &__pyx_n_s_scale_coords) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[310], &__pyx_n_s_scores) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[311], &__pyx_n_s_search) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[312], &__pyx_n_s_search_dir) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[313], &__pyx_n_s_seed) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[314], &__pyx_n_s_send) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[315], &__pyx_n_s_sep) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[316], &__pyx_n_s_setNumThreads) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[317], &__pyx_n_s_set_logging) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[318], &__pyx_n_s_set_printoptions) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[319], &__pyx_n_s_shape) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[320], &__pyx_n_s_shutil) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[321], &__pyx_n_s_spec) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[322], &__pyx_n_s_split) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[323], &__pyx_n_s_st_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[324], &__pyx_n_s_startswith) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[325], &__pyx_n_s_stat) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[326], &__pyx_n_s_stem) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[327], &__pyx_n_s_steps) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[328], &__pyx_n_s_str) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[329], &__pyx_n_s_string) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[330], &__pyx_n_s_sub) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[331], &__pyx_n_s_suffix) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[332], &__pyx_n_s_sum) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[333], &__pyx_n_s_system) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[334], &__pyx_n_s_t) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[335], &__pyx_n_s_tensor) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[336], &__pyx_n_s_test) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[337], &__pyx_n_s_test_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[338], &__pyx_n_s_throw) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[339], &__pyx_n_s_time) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[340], &__pyx_n_s_time_limit) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[341], &__pyx_kp_u_tmp) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[342], &__pyx_kp_u_tmp_txt) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[343], &__pyx_n_s_torch) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[344], &__pyx_n_s_torch_backends_cudnn) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[345], &__pyx_n_s_torchvision) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[346], &__pyx_n_u_true) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[347], &__pyx_n_s_try_except) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[348], &__pyx_n_s_try_except_locals_handler) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[349], &__pyx_n_u_underline) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[350], &__pyx_n_s_unlink) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[351], &__pyx_n_s_unquote) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[352], &__pyx_kp_u_updating_to) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[353], &__pyx_n_s_url) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[354], &__pyx_n_s_url2file) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[355], &__pyx_n_s_urllib) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[356], &__pyx_n_s_user_config_dir) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[357], &__pyx_n_s_v) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[358], &__pyx_kp_u_valid_values_are_between_0_0_an) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[359], &__pyx_n_s_vars) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[360], &__pyx_n_s_verbose) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[361], &__pyx_kp_u_version) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[362], &__pyx_n_s_view) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[363], &__pyx_n_s_w) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[364], &__pyx_n_u_w) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[365], &__pyx_n_s_warning) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[366], &__pyx_n_s_weights) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[367], &__pyx_n_u_white) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[368], &__pyx_n_s_with_suffix) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[369], &__pyx_n_s_x) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[370], &__pyx_n_s_xc) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[371], &__pyx_n_s_xi) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[372], &__pyx_n_s_xyn2xy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[373], &__pyx_n_s_xywh2xyxy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[374], &__pyx_n_s_xywhn2xyxy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[375], &__pyx_n_s_xyxy2xywh) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[376], &__pyx_n_s_xyxy2xywhn) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[377], &__pyx_n_s_y) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[378], &__pyx_n_s_y1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[379], &__pyx_n_s_y2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[380], &__pyx_n_s_yaml) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[381], &__pyx_n_u_yellow) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[382], &__pyx_n_u_yolov5) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[383], &__pyx_n_s_zeros) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - #endif - #if !CYTHON_USE_MODULE_STATE - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - #endif - __pyx_float_0_0 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_float_0_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_1E6 = PyFloat_FromDouble(1E6); if (unlikely(!__pyx_float_1E6)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_3E3 = PyFloat_FromDouble(3E3); if (unlikely(!__pyx_float_3E3)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_25 = PyFloat_FromDouble(0.25); if (unlikely(!__pyx_float_0_25)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_45 = PyFloat_FromDouble(0.45); if (unlikely(!__pyx_float_0_45)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_6 = PyInt_FromLong(6); if (unlikely(!__pyx_int_6)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_10 = PyInt_FromLong(10); if (unlikely(!__pyx_int_10)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_32 = PyInt_FromLong(32); if (unlikely(!__pyx_int_32)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_80 = PyInt_FromLong(80); if (unlikely(!__pyx_int_80)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_100 = PyInt_FromLong(100); if (unlikely(!__pyx_int_100)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_300 = PyInt_FromLong(300); if (unlikely(!__pyx_int_300)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_320 = PyInt_FromLong(320); if (unlikely(!__pyx_int_320)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_640 = PyInt_FromLong(640); if (unlikely(!__pyx_int_640)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: init_globals ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - return 0; -} -/* #### Code section: init_module ### */ - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except)) __PYX_ERR(0, 49, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except) < 0) __PYX_ERR(0, 49, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except) < 0) __PYX_ERR(0, 49, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct__try_except->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args)) __PYX_ERR(0, 65, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args) < 0) __PYX_ERR(0, 65, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args) < 0) __PYX_ERR(0, 65, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_1_print_args->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr)) __PYX_ERR(0, 67, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr) < 0) __PYX_ERR(0, 67, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr) < 0) __PYX_ERR(0, 67, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_2_genexpr->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts)) __PYX_ERR(0, 80, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts) < 0) __PYX_ERR(0, 80, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts) < 0) __PYX_ERR(0, 80, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_3_intersect_dicts->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr)) __PYX_ERR(0, 82, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr) < 0) __PYX_ERR(0, 82, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr) < 0) __PYX_ERR(0, 82, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_4_genexpr->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size)) __PYX_ERR(0, 135, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size) < 0) __PYX_ERR(0, 135, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size) < 0) __PYX_ERR(0, 135, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_5_file_size->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr)) __PYX_ERR(0, 141, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr) < 0) __PYX_ERR(0, 141, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr) < 0) __PYX_ERR(0, 141, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_6_genexpr->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle)) __PYX_ERR(0, 186, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle) < 0) __PYX_ERR(0, 186, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle) < 0) __PYX_ERR(0, 186, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_7_one_cycle->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr)) __PYX_ERR(0, 191, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr) < 0) __PYX_ERR(0, 191, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr) < 0) __PYX_ERR(0, 191, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_8_colorstr->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - #if CYTHON_USE_TYPE_SPECS - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr_spec, NULL); if (unlikely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr)) __PYX_ERR(0, 213, __pyx_L1_error) - if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr_spec, __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr) < 0) __PYX_ERR(0, 213, __pyx_L1_error) - #else - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr = &__pyx_type_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - #endif - #if !CYTHON_USE_TYPE_SPECS - if (__Pyx_PyType_Ready(__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr) < 0) __PYX_ERR(0, 213, __pyx_L1_error) - #endif - #if PY_MAJOR_VERSION < 3 - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr->tp_print = 0; - #endif - #if !CYTHON_COMPILING_IN_LIMITED_API - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr->tp_dictoffset && __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr->tp_getattro == PyObject_GenericGetAttr)) { - __pyx_ptype_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general___pyx_scope_struct_9_genexpr->tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - } - #endif - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_general(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_general}, - {0, NULL} -}; -#endif - -#ifdef __cplusplus -namespace { - struct PyModuleDef __pyx_moduledef = - #else - static struct PyModuleDef __pyx_moduledef = - #endif - { - PyModuleDef_HEAD_INIT, - "general", - __pyx_k_General_utils, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #elif CYTHON_USE_MODULE_STATE - sizeof(__pyx_mstate), /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - #if CYTHON_USE_MODULE_STATE - __pyx_m_traverse, /* m_traverse */ - __pyx_m_clear, /* m_clear */ - NULL /* m_free */ - #else - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ - #endif - }; - #ifdef __cplusplus -} /* anonymous namespace */ -#endif -#endif - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initgeneral(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initgeneral(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_general(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_general(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) -#else -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) -#endif -{ - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { -#if CYTHON_COMPILING_IN_LIMITED_API - result = PyModule_AddObject(module, to_name, value); -#else - result = PyDict_SetItemString(moddict, to_name, value); -#endif - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - CYTHON_UNUSED_VAR(def); - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; -#if CYTHON_COMPILING_IN_LIMITED_API - moddict = module; -#else - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; -#endif - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_general(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - int stringtab_initialized = 0; - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - long __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'general' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("general", __pyx_methods, __pyx_k_General_utils, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #elif CYTHON_COMPILING_IN_LIMITED_API - __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - { - int add_module_result = PyState_AddModule(__pyx_t_1, &__pyx_moduledef); - Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - } - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #endif - CYTHON_UNUSED_VAR(__pyx_t_1); - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_general(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - PyEval_InitThreads(); - #endif - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - stringtab_initialized = 1; - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_pdf_toolbox__lib__dia_yolov5__utils__general) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "pdf_toolbox.lib.dia_yolov5.utils.general")) { - if (unlikely((PyDict_SetItemString(modules, "pdf_toolbox.lib.dia_yolov5.utils.general", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - if (unlikely((__Pyx_modinit_type_init_code() < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":6 - * """ - * - * import glob # <<<<<<<<<<<<<< - * import logging - * import math - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_glob, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_glob, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":7 - * - * import glob - * import logging # <<<<<<<<<<<<<< - * import math - * import os - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_logging, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_logging, __pyx_t_2) < 0) __PYX_ERR(0, 7, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":8 - * import glob - * import logging - * import math # <<<<<<<<<<<<<< - * import os - * import platform - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_math, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_math, __pyx_t_2) < 0) __PYX_ERR(0, 8, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":9 - * import logging - * import math - * import os # <<<<<<<<<<<<<< - * import platform - * import random - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_os, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_os, __pyx_t_2) < 0) __PYX_ERR(0, 9, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":10 - * import math - * import os - * import platform # <<<<<<<<<<<<<< - * import random - * import re - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_platform, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_platform, __pyx_t_2) < 0) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":11 - * import os - * import platform - * import random # <<<<<<<<<<<<<< - * import re - * import shutil - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_random, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 11, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_random, __pyx_t_2) < 0) __PYX_ERR(0, 11, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":12 - * import platform - * import random - * import re # <<<<<<<<<<<<<< - * import shutil - * import time - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_re, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_re, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":13 - * import random - * import re - * import shutil # <<<<<<<<<<<<<< - * import time - * import urllib - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_shutil, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_shutil, __pyx_t_2) < 0) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":14 - * import re - * import shutil - * import time # <<<<<<<<<<<<<< - * import urllib - * from pathlib import Path - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_time, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_time, __pyx_t_2) < 0) __PYX_ERR(0, 14, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":15 - * import shutil - * import time - * import urllib # <<<<<<<<<<<<<< - * from pathlib import Path - * - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_urllib, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_urllib, __pyx_t_2) < 0) __PYX_ERR(0, 15, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":16 - * import time - * import urllib - * from pathlib import Path # <<<<<<<<<<<<<< - * - * import cv2 - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_Path); - __Pyx_GIVEREF(__pyx_n_s_Path); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_Path); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pathlib, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_Path); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_Path, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":18 - * from pathlib import Path - * - * import cv2 # <<<<<<<<<<<<<< - * import numpy as np - * import pandas as pd - */ - __pyx_t_3 = __Pyx_ImportDottedModule(__pyx_n_s_cv2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_cv2, __pyx_t_3) < 0) __PYX_ERR(0, 18, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":19 - * - * import cv2 - * import numpy as np # <<<<<<<<<<<<<< - * import pandas as pd - * import torch - */ - __pyx_t_3 = __Pyx_ImportDottedModule(__pyx_n_s_numpy, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_3) < 0) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":20 - * import cv2 - * import numpy as np - * import pandas as pd # <<<<<<<<<<<<<< - * import torch - * import torchvision - */ - __pyx_t_3 = __Pyx_ImportDottedModule(__pyx_n_s_pandas, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pd, __pyx_t_3) < 0) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":21 - * import numpy as np - * import pandas as pd - * import torch # <<<<<<<<<<<<<< - * import torchvision - * import yaml - */ - __pyx_t_3 = __Pyx_ImportDottedModule(__pyx_n_s_torch, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_torch, __pyx_t_3) < 0) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":22 - * import pandas as pd - * import torch - * import torchvision # <<<<<<<<<<<<<< - * import yaml - * - */ - __pyx_t_3 = __Pyx_ImportDottedModule(__pyx_n_s_torchvision, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_torchvision, __pyx_t_3) < 0) __PYX_ERR(0, 22, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":23 - * import torch - * import torchvision - * import yaml # <<<<<<<<<<<<<< - * - * from pdf_toolbox.lib.dia_yolov5.utils.metrics import box_iou, fitness - */ - __pyx_t_3 = __Pyx_ImportDottedModule(__pyx_n_s_yaml, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_yaml, __pyx_t_3) < 0) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":25 - * import yaml - * - * from pdf_toolbox.lib.dia_yolov5.utils.metrics import box_iou, fitness # <<<<<<<<<<<<<< - * - * # Settings - */ - __pyx_t_3 = PyList_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_n_s_box_iou); - __Pyx_GIVEREF(__pyx_n_s_box_iou); - PyList_SET_ITEM(__pyx_t_3, 0, __pyx_n_s_box_iou); - __Pyx_INCREF(__pyx_n_s_fitness); - __Pyx_GIVEREF(__pyx_n_s_fitness); - PyList_SET_ITEM(__pyx_t_3, 1, __pyx_n_s_fitness); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_t_3, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_box_iou); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_box_iou, __pyx_t_3) < 0) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_fitness); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_fitness, __pyx_t_3) < 0) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":28 - * - * # Settings - * FILE = Path(__file__).resolve() # <<<<<<<<<<<<<< - * ROOT = FILE.parents[1] # YOLOv5 root directory - * NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_Path); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_file); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_resolve); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_FILE, __pyx_t_4) < 0) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":29 - * # Settings - * FILE = Path(__file__).resolve() - * ROOT = FILE.parents[1] # YOLOv5 root directory # <<<<<<<<<<<<<< - * NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads - * VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_FILE); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_parents); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_ROOT, __pyx_t_4) < 0) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":30 - * FILE = Path(__file__).resolve() - * ROOT = FILE.parents[1] # YOLOv5 root directory - * NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads # <<<<<<<<<<<<<< - * VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_os); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_cpu_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyInt_SubtractObjC(__pyx_t_4, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = 1; - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = PyObject_RichCompare(__pyx_t_3, __pyx_t_2, Py_GT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (__pyx_t_7) { - __Pyx_INCREF(__pyx_t_3); - __pyx_t_4 = __pyx_t_3; - } else { - __pyx_t_6 = __Pyx_PyInt_From_long(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_4 = __pyx_t_6; - __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = 8; - __pyx_t_6 = __Pyx_PyInt_From_long(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_t_6, Py_LT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_7 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_7 < 0))) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_7) { - __Pyx_INCREF(__pyx_t_3); - __pyx_t_4 = __pyx_t_3; - } else { - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __pyx_t_2; - __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __pyx_t_4; - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_NUM_THREADS, __pyx_t_3) < 0) __PYX_ERR(0, 30, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":31 - * ROOT = FILE.parents[1] # YOLOv5 root directory - * NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads - * VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode # <<<<<<<<<<<<<< - * - * torch.set_printoptions(linewidth=320, precision=5, profile='long') - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_os); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_getenv); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__44, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Str(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_lower); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_n_u_true, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_VERBOSE, __pyx_t_3) < 0) __PYX_ERR(0, 31, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":33 - * VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode - * - * torch.set_printoptions(linewidth=320, precision=5, profile='long') # <<<<<<<<<<<<<< - * np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 - * pd.options.display.max_columns = 10 - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_torch); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_set_printoptions); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_linewidth, __pyx_int_320) < 0) __PYX_ERR(0, 33, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_precision, __pyx_int_5) < 0) __PYX_ERR(0, 33, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_profile, __pyx_n_u_long) < 0) __PYX_ERR(0, 33, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":34 - * - * torch.set_printoptions(linewidth=320, precision=5, profile='long') - * np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 # <<<<<<<<<<<<<< - * pd.options.display.max_columns = 10 - * cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_set_printoptions); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_linewidth, __pyx_int_320) < 0) __PYX_ERR(0, 34, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_11_5g, __pyx_n_s_format); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (PyDict_SetItem(__pyx_t_4, __pyx_n_u_float_kind, __pyx_t_6) < 0) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_formatter, __pyx_t_4) < 0) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":35 - * torch.set_printoptions(linewidth=320, precision=5, profile='long') - * np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 - * pd.options.display.max_columns = 10 # <<<<<<<<<<<<<< - * cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) - * os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pd); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_options); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_display); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__Pyx_PyObject_SetAttrStr(__pyx_t_4, __pyx_n_s_max_columns, __pyx_int_10) < 0) __PYX_ERR(0, 35, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":36 - * np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 - * pd.options.display.max_columns = 10 - * cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) # <<<<<<<<<<<<<< - * os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_cv2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_setNumThreads); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__45, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":37 - * pd.options.display.max_columns = 10 - * cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) - * os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads # <<<<<<<<<<<<<< - * - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_NUM_THREADS); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = __Pyx_PyObject_Str(__pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_os); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_environ); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely((PyObject_SetItem(__pyx_t_3, __pyx_n_u_NUMEXPR_MAX_THREADS, __pyx_t_2) < 0))) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":40 - * - * - * def set_logging(name=None, verbose=VERBOSE): # <<<<<<<<<<<<<< - * # Sets level and returns logger - * rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_1set_logging, 0, __pyx_n_s_set_logging, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__47)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (!__Pyx_CyFunction_InitDefaults(__pyx_t_2, sizeof(__pyx_defaults), 1)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_VERBOSE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_Defaults(__pyx_defaults, __pyx_t_2)->__pyx_arg_verbose = __pyx_t_3; - __Pyx_GIVEREF(__pyx_t_3); - __pyx_t_3 = 0; - __Pyx_CyFunction_SetDefaultsGetter(__pyx_t_2, __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_64__defaults__); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_set_logging, __pyx_t_2) < 0) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":47 - * - * - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) # <<<<<<<<<<<<<< - * - * def try_except(func): - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_set_logging); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__48, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_LOGGER, __pyx_t_3) < 0) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":49 - * LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) - * - * def try_except(func): # <<<<<<<<<<<<<< - * # try-except function. Usage: @try_except decorator - * def handler(*args, **kwargs): - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_3try_except, 0, __pyx_n_s_try_except, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__50)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_try_except, __pyx_t_3) < 0) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":60 - * - * - * def methods(instance): # <<<<<<<<<<<<<< - * # Get class/instance methods - * return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_5methods, 0, __pyx_n_s_methods, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__52)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_methods, __pyx_t_3) < 0) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":65 - * - * - * def print_args(name, opt): # <<<<<<<<<<<<<< - * # Print argparser arguments - * LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_7print_args, 0, __pyx_n_s_print_args, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__54)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_print_args, __pyx_t_3) < 0) __PYX_ERR(0, 65, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":70 - * - * - * def init_seeds(seed=0): # <<<<<<<<<<<<<< - * # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - * # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_9init_seeds, 0, __pyx_n_s_init_seeds, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__56)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__57); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_init_seeds, __pyx_t_3) < 0) __PYX_ERR(0, 70, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":80 - * - * - * def intersect_dicts(da, db, exclude=()): # <<<<<<<<<<<<<< - * # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - * return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_11intersect_dicts, 0, __pyx_n_s_intersect_dicts, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__59)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__60); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_intersect_dicts, __pyx_t_3) < 0) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":85 - * - * - * def get_latest_run(search_dir='.'): # <<<<<<<<<<<<<< - * # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - * last_list = glob.glob(f'{search_dir}/[inserted by cython to avoid comment start]**[inserted by cython to avoid comment closer]/last*.pt', recursive=True) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_13get_latest_run, 0, __pyx_n_s_get_latest_run, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__62)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__63); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_get_latest_run, __pyx_t_3) < 0) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":91 - * - * - * def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): # <<<<<<<<<<<<<< - * # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - * env = os.getenv(env_var) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_15user_config_dir, 0, __pyx_n_s_user_config_dir, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__65)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__66); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_user_config_dir, __pyx_t_3) < 0) __PYX_ERR(0, 91, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":104 - * - * - * def is_writeable(dir, test=False): # <<<<<<<<<<<<<< - * # Return True if directory has write permissions, test opening a file with write permissions if test=True - * if test: # method 1 - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_17is_writeable, 0, __pyx_n_s_is_writeable, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__68)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__69); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_is_writeable, __pyx_t_3) < 0) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":119 - * - * - * def is_ascii(s=''): # <<<<<<<<<<<<<< - * # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - * s = str(s) # convert list, tuple, None, etc. to str - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_19is_ascii, 0, __pyx_n_s_is_ascii, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__71)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__72); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_is_ascii, __pyx_t_3) < 0) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":125 - * - * - * def is_chinese(s=''): # <<<<<<<<<<<<<< - * # Is string composed of any Chinese characters? - * return re.search('[\u4e00-\u9fff]', s) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_21is_chinese, 0, __pyx_n_s_is_chinese, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__73)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 125, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__74); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_is_chinese, __pyx_t_3) < 0) __PYX_ERR(0, 125, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":130 - * - * - * def emojis(str=''): # <<<<<<<<<<<<<< - * # Return platform-dependent emoji-safe version of string - * return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_23emojis, 0, __pyx_n_s_emojis, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__76)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__77); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_emojis, __pyx_t_3) < 0) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":135 - * - * - * def file_size(path): # <<<<<<<<<<<<<< - * # Return file/dir size (MB) - * path = Path(path) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_25file_size, 0, __pyx_n_s_file_size, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__79)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_file_size, __pyx_t_3) < 0) __PYX_ERR(0, 135, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":146 - * - * - * def check_python(minimum='3.6.2'): # <<<<<<<<<<<<<< - * # Check current python version vs. required python version - * check_version(platform.python_version(), minimum, name='Python ', hard=True) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_27check_python, 0, __pyx_n_s_check_python, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__81)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__82); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_check_python, __pyx_t_3) < 0) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":151 - * - * - * def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # <<<<<<<<<<<<<< - * # Check version vs. required version - * return True - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_29check_version, 0, __pyx_n_s_check_version, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__84)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__85); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_check_version, __pyx_t_3) < 0) __PYX_ERR(0, 151, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":156 - * - * - * def check_img_size(imgsz, s=32, floor=0): # <<<<<<<<<<<<<< - * # Verify image size is a multiple of stride s in each dimension - * if isinstance(imgsz, int): # integer i.e. img_size=640 - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_31check_img_size, 0, __pyx_n_s_check_img_size, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__87)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__88); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_check_img_size, __pyx_t_3) < 0) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":167 - * - * - * def url2file(url): # <<<<<<<<<<<<<< - * # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - * url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_33url2file, 0, __pyx_n_s_url2file, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__90)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 167, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_url2file, __pyx_t_3) < 0) __PYX_ERR(0, 167, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":174 - * - * - * def make_divisible(x, divisor): # <<<<<<<<<<<<<< - * # Returns nearest x divisible by divisor - * if isinstance(divisor, torch.Tensor): - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_35make_divisible, 0, __pyx_n_s_make_divisible, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__92)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 174, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_make_divisible, __pyx_t_3) < 0) __PYX_ERR(0, 174, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":181 - * - * - * def clean_str(s): # <<<<<<<<<<<<<< - * # Cleans a string by replacing special characters with underscore _ - * return re.sub(pattern="[|@#!$%&()=?^*;:,><+]", repl="_", string=s) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_37clean_str, 0, __pyx_n_s_clean_str, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__93)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_clean_str, __pyx_t_3) < 0) __PYX_ERR(0, 181, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":186 - * - * - * def one_cycle(y1=0.0, y2=1.0, steps=100): # <<<<<<<<<<<<<< - * # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - * return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_39one_cycle, 0, __pyx_n_s_one_cycle, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__95)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 186, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__96); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_one_cycle, __pyx_t_3) < 0) __PYX_ERR(0, 186, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":191 - * - * - * def colorstr(*input): # <<<<<<<<<<<<<< - * # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - * *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_41colorstr, 0, __pyx_n_s_colorstr, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__98)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 191, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_colorstr, __pyx_t_3) < 0) __PYX_ERR(0, 191, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":216 - * - * - * def labels_to_class_weights(labels, nc=80): # <<<<<<<<<<<<<< - * # Get class weights (inverse frequency) from training labels - * if labels[0] is None: # no labels loaded - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_43labels_to_class_weights, 0, __pyx_n_s_labels_to_class_weights, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__100)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 216, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__101); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_labels_to_class_weights, __pyx_t_3) < 0) __PYX_ERR(0, 216, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":235 - * - * - * def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # <<<<<<<<<<<<<< - * # Produces image weights based on class_weights and image contents - * class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_45labels_to_image_weights, 0, __pyx_n_s_labels_to_image_weights, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__103)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (!__Pyx_CyFunction_InitDefaults(__pyx_t_3, sizeof(__pyx_defaults1), 1)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_ones); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_tuple__104, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_CyFunction_Defaults(__pyx_defaults1, __pyx_t_3)->__pyx_arg_class_weights = __pyx_t_2; - __Pyx_GIVEREF(__pyx_t_2); - __pyx_t_2 = 0; - __Pyx_CyFunction_SetDefaultsGetter(__pyx_t_3, __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_66__defaults__); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_labels_to_image_weights, __pyx_t_3) < 0) __PYX_ERR(0, 235, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":243 - * - * - * def xyxy2xywh(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_47xyxy2xywh, 0, __pyx_n_s_xyxy2xywh, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__106)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 243, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_xyxy2xywh, __pyx_t_3) < 0) __PYX_ERR(0, 243, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":253 - * - * - * def xywh2xyxy(x): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_49xywh2xyxy, 0, __pyx_n_s_xywh2xyxy, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__107)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_xywh2xyxy, __pyx_t_3) < 0) __PYX_ERR(0, 253, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":263 - * - * - * def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_51xywhn2xyxy, 0, __pyx_n_s_xywhn2xyxy, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__109)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__110); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_xywhn2xyxy, __pyx_t_3) < 0) __PYX_ERR(0, 263, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":273 - * - * - * def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # <<<<<<<<<<<<<< - * # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - * if clip: - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_53xyxy2xywhn, 0, __pyx_n_s_xyxy2xywhn, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__112)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 273, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__113); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_xyxy2xywhn, __pyx_t_3) < 0) __PYX_ERR(0, 273, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":285 - * - * - * def xyn2xy(x, w=640, h=640, padw=0, padh=0): # <<<<<<<<<<<<<< - * # Convert normalized segments into pixel segments, shape (n,2) - * y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_55xyn2xy, 0, __pyx_n_s_xyn2xy, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__114)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 285, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__115); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_xyn2xy, __pyx_t_3) < 0) __PYX_ERR(0, 285, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":293 - * - * - * def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # <<<<<<<<<<<<<< - * # Rescale coords (xyxy) from img1_shape to img0_shape - * if ratio_pad is None: # calculate from img0_shape - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_57scale_coords, 0, __pyx_n_s_scale_coords, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__117)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 293, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__118); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_scale_coords, __pyx_t_3) < 0) __PYX_ERR(0, 293, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":309 - * - * - * def clip_coords(boxes, shape): # <<<<<<<<<<<<<< - * # Clip bounding xyxy bounding boxes to image shape (height, width) - * if isinstance(boxes, torch.Tensor): # faster individually - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_59clip_coords, 0, __pyx_n_s_clip_coords, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__120)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 309, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_clip_coords, __pyx_t_3) < 0) __PYX_ERR(0, 309, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":321 - * - * - * def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, # <<<<<<<<<<<<<< - * labels=(), max_det=300): - * """Runs Non-Maximum Suppression (NMS) on inference results - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_61non_max_suppression, 0, __pyx_n_s_non_max_suppression, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__122)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 321, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__123); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_non_max_suppression, __pyx_t_3) < 0) __PYX_ERR(0, 321, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":415 - * - * - * def increment_path(path, exist_ok=False, sep='', mkdir=False): # <<<<<<<<<<<<<< - * # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - * path = Path(path) # os-agnostic - */ - __pyx_t_3 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_7general_63increment_path, 0, __pyx_n_s_increment_path, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_d, ((PyObject *)__pyx_codeobj__125)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 415, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_3, __pyx_tuple__126); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_increment_path, __pyx_t_3) < 0) __PYX_ERR(0, 415, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":431 - * - * # Variables - * NCOLS = shutil.get_terminal_size().columns # terminal window size for tqdm # <<<<<<<<<<<<<< - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_shutil); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 431, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_get_terminal_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 431, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 431, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_columns); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 431, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PyDict_SetItem(__pyx_d, __pyx_n_s_NCOLS, __pyx_t_2) < 0) __PYX_ERR(0, 431, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/general.py":1 - * # YOLOv5 by Ultralytics, GPL-3.0 license # <<<<<<<<<<<<<< - * """ - * General utils - */ - __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test_2, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); - if (__pyx_m) { - if (__pyx_d && stringtab_initialized) { - __Pyx_AddTraceback("init pdf_toolbox.lib.dia_yolov5.utils.general", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - #if !CYTHON_USE_MODULE_STATE - Py_CLEAR(__pyx_m); - #endif - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init pdf_toolbox.lib.dia_yolov5.utils.general"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} -/* #### Code section: cleanup_globals ### */ -/* #### Code section: cleanup_module ### */ -/* #### Code section: main_method ### */ -/* #### Code section: utility_code_pragmas ### */ -#if _MSC_VER -#pragma warning( push ) -/* Warning 4127: conditional expression is constant - * Cython uses constant conditional expressions to allow in inline functions to be optimized at - * compile-time, so this warning is not useful - */ -#pragma warning( disable : 4127 ) -#endif - - - -/* #### Code section: utility_code_def ### */ - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; icurexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_b, name); - if (unlikely(!result) && !PyErr_Occurred()) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* TupleAndListFromArray */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { - PyObject *v; - Py_ssize_t i; - for (i = 0; i < length; i++) { - v = dest[i] = src[i]; - Py_INCREF(v); - } -} -static CYTHON_INLINE PyObject * -__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - Py_INCREF(__pyx_empty_tuple); - return __pyx_empty_tuple; - } - res = PyTuple_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); - return res; -} -static CYTHON_INLINE PyObject * -__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - return PyList_New(0); - } - res = PyList_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); - return res; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* fastcall */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) -{ - Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); - for (i = 0; i < n; i++) - { - if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; - } - for (i = 0; i < n; i++) - { - int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); - if (unlikely(eq != 0)) { - if (unlikely(eq < 0)) return NULL; // error - return kwvalues[i]; - } - } - return NULL; // not found (no exception set) -} -#endif - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); - while (1) { - if (kwds_is_tuple) { - if (pos >= PyTuple_GET_SIZE(kwds)) break; - key = PyTuple_GET_ITEM(kwds, pos); - value = kwvalues[pos]; - pos++; - } - else - { - if (!PyDict_Next(kwds, &pos, &key, &value)) break; - } - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = ( - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key) - ); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - #if PY_MAJOR_VERSION < 3 - PyErr_Format(PyExc_TypeError, - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - PyErr_Format(PyExc_TypeError, - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#elif CYTHON_COMPILING_IN_LIMITED_API - if (unlikely(!__pyx_m)) { - return NULL; - } - result = PyObject_GetAttr(__pyx_m, name); - if (likely(result)) { - return result; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyIntCompare */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_UNUSED_VAR(inplace); - if (op1 == op2) { - Py_RETURN_TRUE; - } - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long a = PyInt_AS_LONG(op1); - if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE; - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - int unequal; - unsigned long uintval; - Py_ssize_t size = Py_SIZE(op1); - const digit* digits = ((PyLongObject*)op1)->ob_digit; - if (intval == 0) { - if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; - } else if (intval < 0) { - if (size >= 0) - Py_RETURN_FALSE; - intval = -intval; - size = -size; - } else { - if (size <= 0) - Py_RETURN_FALSE; - } - uintval = (unsigned long) intval; -#if PyLong_SHIFT * 4 < SIZEOF_LONG*8 - if (uintval >> (PyLong_SHIFT * 4)) { - unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) - | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); - } else -#endif -#if PyLong_SHIFT * 3 < SIZEOF_LONG*8 - if (uintval >> (PyLong_SHIFT * 3)) { - unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) - | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); - } else -#endif -#if PyLong_SHIFT * 2 < SIZEOF_LONG*8 - if (uintval >> (PyLong_SHIFT * 2)) { - unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) - | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); - } else -#endif -#if PyLong_SHIFT * 1 < SIZEOF_LONG*8 - if (uintval >> (PyLong_SHIFT * 1)) { - unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) - | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); - } else -#endif - unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); - if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; - } - return ( - PyObject_RichCompare(op1, op2, Py_EQ)); -} - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectFastCall */ -static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { - PyObject *argstuple; - PyObject *result; - size_t i; - argstuple = PyTuple_New((Py_ssize_t)nargs); - if (unlikely(!argstuple)) return NULL; - for (i = 0; i < nargs; i++) { - Py_INCREF(args[i]); - PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]); - } - result = __Pyx_PyObject_Call(func, argstuple, kwargs); - Py_DECREF(argstuple); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { - Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); -#if CYTHON_COMPILING_IN_CPYTHON - if (nargs == 0 && kwargs == NULL) { -#ifdef __Pyx_CyFunction_USED - if (__Pyx_IsCyOrPyCFunction(func)) -#else - if (PyCFunction_Check(func)) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - } - else if (nargs == 1 && kwargs == NULL) { - if (PyCFunction_Check(func)) - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, args[0]); - } - } - } -#endif - #if PY_VERSION_HEX < 0x030800B1 - #if CYTHON_FAST_PYCCALL - if (PyCFunction_Check(func)) { - if (kwargs) { - return _PyCFunction_FastCallDict(func, args, nargs, kwargs); - } else { - return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); - } - } - #if PY_VERSION_HEX >= 0x030700A1 - if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { - return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); - } - #endif - #endif - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); - } - #endif - #endif - #if CYTHON_VECTORCALL - vectorcallfunc f = _PyVectorcall_Function(func); - if (f) { - return f(func, args, (size_t)nargs, kwargs); - } - #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL - if (__Pyx_CyFunction_CheckExact(func)) { - __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); - if (f) return f(func, args, (size_t)nargs, kwargs); - } - #endif - if (nargs == 0) { - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, kwargs); - } - return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); -} - -/* KeywordStringCheck */ -static int __Pyx_CheckKeywordStrings( - PyObject *kw, - const char* function_name, - int kw_allowed) -{ - PyObject* key = 0; - Py_ssize_t pos = 0; -#if CYTHON_COMPILING_IN_PYPY - if (!kw_allowed && PyDict_Next(kw, &pos, &key, 0)) - goto invalid_keyword; - return 1; -#else - if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kw))) { - if (unlikely(PyTuple_GET_SIZE(kw) == 0)) - return 1; - if (!kw_allowed) { - key = PyTuple_GET_ITEM(kw, 0); - goto invalid_keyword; - } -#if PY_VERSION_HEX < 0x03090000 - for (pos = 0; pos < PyTuple_GET_SIZE(kw); pos++) { - key = PyTuple_GET_ITEM(kw, pos); - if (unlikely(!PyUnicode_Check(key))) - goto invalid_keyword_type; - } -#endif - return 1; - } - while (PyDict_Next(kw, &pos, &key, 0)) { - #if PY_MAJOR_VERSION < 3 - if (unlikely(!PyString_Check(key))) - #endif - if (unlikely(!PyUnicode_Check(key))) - goto invalid_keyword_type; - } - if (!kw_allowed && unlikely(key)) - goto invalid_keyword; - return 1; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - return 0; -#endif -invalid_keyword: - #if PY_MAJOR_VERSION < 3 - PyErr_Format(PyExc_TypeError, - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - PyErr_Format(PyExc_TypeError, - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif - return 0; -} - -/* RaiseClosureNameError */ -static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) { - PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname); -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* PyObjectCallOneArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *args[2] = {NULL, arg}; - return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* SwapException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* FixUpExtensionType */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { -#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API - (void) spec; - (void) type; -#else - const PyType_Slot *slot = spec->slots; - while (slot && slot->slot && slot->slot != Py_tp_members) - slot++; - if (slot && slot->slot == Py_tp_members) { - int changed = 0; -#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) - const -#endif - PyMemberDef *memb = (PyMemberDef*) slot->pfunc; - while (memb && memb->name) { - if (memb->name[0] == '_' && memb->name[1] == '_') { -#if PY_VERSION_HEX < 0x030900b1 - if (strcmp(memb->name, "__weaklistoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_weaklistoffset = memb->offset; - changed = 1; - } - else if (strcmp(memb->name, "__dictoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_dictoffset = memb->offset; - changed = 1; - } -#if CYTHON_METH_FASTCALL - else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); -#if PY_VERSION_HEX >= 0x030800b4 - type->tp_vectorcall_offset = memb->offset; -#else - type->tp_print = (printfunc) memb->offset; -#endif - changed = 1; - } -#endif -#else - if ((0)); -#endif -#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON - else if (strcmp(memb->name, "__module__") == 0) { - PyObject *descr; - assert(memb->type == T_OBJECT); - assert(memb->flags == 0 || memb->flags == READONLY); - descr = PyDescr_NewMember(type, memb); - if (unlikely(!descr)) - return -1; - if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - changed = 1; - } -#endif - } - memb++; - } - if (changed) - PyType_Modified(type); - } -#endif - return 0; -} -#endif - -/* FetchCommonType */ -static PyObject *__Pyx_FetchSharedCythonABIModule(void) { - PyObject *abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME); - if (!abi_module) return NULL; - Py_INCREF(abi_module); - return abi_module; -} -static int __Pyx_VerifyCachedType(PyObject *cached_type, - const char *name, - Py_ssize_t basicsize, - Py_ssize_t expected_basicsize) { - if (!PyType_Check(cached_type)) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s is not a type object", name); - return -1; - } - if (basicsize != expected_basicsize) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s has the wrong size, try recompiling", - name); - return -1; - } - return 0; -} -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { - PyObject* abi_module; - const char* object_name; - PyTypeObject *cached_type = NULL; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - object_name = strrchr(type->tp_name, '.'); - object_name = object_name ? object_name+1 : type->tp_name; - cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - if (__Pyx_VerifyCachedType( - (PyObject *)cached_type, - object_name, - cached_type->tp_basicsize, - type->tp_basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - if (PyType_Ready(type) < 0) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) - goto bad; - Py_INCREF(type); - cached_type = type; -done: - Py_DECREF(abi_module); - return cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#else -static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { - PyObject *abi_module, *cached_type = NULL; - const char* object_name = strrchr(spec->name, '.'); - object_name = object_name ? object_name+1 : spec->name; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - cached_type = PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - Py_ssize_t basicsize; -#if CYTHON_COMPILING_IN_LIMITED_API - PyObject *py_basicsize; - py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); - if (unlikely(!py_basicsize)) goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; -#else - basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; -#endif - if (__Pyx_VerifyCachedType( - cached_type, - object_name, - basicsize, - spec->basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - (void) module; - cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); - if (unlikely(!cached_type)) goto bad; - if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; -done: - Py_DECREF(abi_module); - assert(cached_type == NULL || PyType_Check(cached_type)); - return (PyTypeObject *) cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#endif - -/* PyVectorcallFastCallDict */ -#if CYTHON_METH_FASTCALL -static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - PyObject *res = NULL; - PyObject *kwnames; - PyObject **newargs; - PyObject **kwvalues; - Py_ssize_t i, pos; - size_t j; - PyObject *key, *value; - unsigned long keys_are_strings; - Py_ssize_t nkw = PyDict_GET_SIZE(kw); - newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); - if (unlikely(newargs == NULL)) { - PyErr_NoMemory(); - return NULL; - } - for (j = 0; j < nargs; j++) newargs[j] = args[j]; - kwnames = PyTuple_New(nkw); - if (unlikely(kwnames == NULL)) { - PyMem_Free(newargs); - return NULL; - } - kwvalues = newargs + nargs; - pos = i = 0; - keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; - while (PyDict_Next(kw, &pos, &key, &value)) { - keys_are_strings &= Py_TYPE(key)->tp_flags; - Py_INCREF(key); - Py_INCREF(value); - PyTuple_SET_ITEM(kwnames, i, key); - kwvalues[i] = value; - i++; - } - if (unlikely(!keys_are_strings)) { - PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - goto cleanup; - } - res = vc(func, newargs, nargs, kwnames); -cleanup: - Py_DECREF(kwnames); - for (i = 0; i < nkw; i++) - Py_DECREF(kwvalues[i]); - PyMem_Free(newargs); - return res; -} -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { - return vc(func, args, nargs, NULL); - } - return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); -} -#endif - -/* CythonFunctionShared */ -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { -#if PY_VERSION_HEX < 0x030900B1 - __Pyx_Py_XDECREF_SET( - __Pyx_CyFunction_GetClassObj(f), - ((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#else - __Pyx_Py_XDECREF_SET( - ((PyCMethodObject *) (f))->mm_class, - (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#endif -} -static PyObject * -__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) -{ - CYTHON_UNUSED_VAR(closure); - if (unlikely(op->func_doc == NULL)) { - if (((PyCFunctionObject*)op)->m_ml->ml_doc) { -#if PY_MAJOR_VERSION >= 3 - op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#else - op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#endif - if (unlikely(op->func_doc == NULL)) - return NULL; - } else { - Py_INCREF(Py_None); - return Py_None; - } - } - Py_INCREF(op->func_doc); - return op->func_doc; -} -static int -__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (value == NULL) { - value = Py_None; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_doc, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_name == NULL)) { -#if PY_MAJOR_VERSION >= 3 - op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#else - op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#endif - if (unlikely(op->func_name == NULL)) - return NULL; - } - Py_INCREF(op->func_name); - return op->func_name; -} -static int -__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_name, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_qualname); - return op->func_qualname; -} -static int -__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_qualname, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_dict == NULL)) { - op->func_dict = PyDict_New(); - if (unlikely(op->func_dict == NULL)) - return NULL; - } - Py_INCREF(op->func_dict); - return op->func_dict; -} -static int -__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(value == NULL)) { - PyErr_SetString(PyExc_TypeError, - "function's dictionary may not be deleted"); - return -1; - } - if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "setting function's dictionary to a non-dict"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_dict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_globals); - return op->func_globals; -} -static PyObject * -__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(op); - CYTHON_UNUSED_VAR(context); - Py_INCREF(Py_None); - return Py_None; -} -static PyObject * -__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) -{ - PyObject* result = (op->func_code) ? op->func_code : Py_None; - CYTHON_UNUSED_VAR(context); - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { - int result = 0; - PyObject *res = op->defaults_getter((PyObject *) op); - if (unlikely(!res)) - return -1; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - op->defaults_tuple = PyTuple_GET_ITEM(res, 0); - Py_INCREF(op->defaults_tuple); - op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); - Py_INCREF(op->defaults_kwdict); - #else - op->defaults_tuple = PySequence_ITEM(res, 0); - if (unlikely(!op->defaults_tuple)) result = -1; - else { - op->defaults_kwdict = PySequence_ITEM(res, 1); - if (unlikely(!op->defaults_kwdict)) result = -1; - } - #endif - Py_DECREF(res); - return result; -} -static int -__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__defaults__ must be set to a tuple object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_tuple; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_tuple; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__kwdefaults__ must be set to a dict object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_kwdict; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_kwdict; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value || value == Py_None) { - value = NULL; - } else if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__annotations__ must be set to a dict object"); - return -1; - } - Py_XINCREF(value); - __Pyx_Py_XDECREF_SET(op->func_annotations, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->func_annotations; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - result = PyDict_New(); - if (unlikely(!result)) return NULL; - op->func_annotations = result; - } - Py_INCREF(result); - return result; -} -static PyObject * -__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { - int is_coroutine; - CYTHON_UNUSED_VAR(context); - if (op->func_is_coroutine) { - return __Pyx_NewRef(op->func_is_coroutine); - } - is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; -#if PY_VERSION_HEX >= 0x03050000 - if (is_coroutine) { - PyObject *module, *fromlist, *marker = __pyx_n_s_is_coroutine; - fromlist = PyList_New(1); - if (unlikely(!fromlist)) return NULL; - Py_INCREF(marker); - PyList_SET_ITEM(fromlist, 0, marker); - module = PyImport_ImportModuleLevelObject(__pyx_n_s_asyncio_coroutines, NULL, NULL, fromlist, 0); - Py_DECREF(fromlist); - if (unlikely(!module)) goto ignore; - op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); - Py_DECREF(module); - if (likely(op->func_is_coroutine)) { - return __Pyx_NewRef(op->func_is_coroutine); - } -ignore: - PyErr_Clear(); - } -#endif - op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); - return __Pyx_NewRef(op->func_is_coroutine); -} -static PyGetSetDef __pyx_CyFunction_getsets[] = { - {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, - {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, - {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, - {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, - {0, 0, 0, 0, 0} -}; -static PyMemberDef __pyx_CyFunction_members[] = { - {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, -#if CYTHON_USE_TYPE_SPECS - {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, -#if CYTHON_METH_FASTCALL -#if CYTHON_BACKPORT_VECTORCALL - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, -#else - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, -#endif -#endif -#if PY_VERSION_HEX < 0x030500A0 - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, -#else - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, -#endif -#endif - {0, 0, 0, 0, 0} -}; -static PyObject * -__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) -{ - CYTHON_UNUSED_VAR(args); -#if PY_MAJOR_VERSION >= 3 - Py_INCREF(m->func_qualname); - return m->func_qualname; -#else - return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); -#endif -} -static PyMethodDef __pyx_CyFunction_methods[] = { - {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, - {0, 0, 0, 0} -}; -#if PY_VERSION_HEX < 0x030500A0 -#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) -#else -#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) -#endif -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyCFunctionObject *cf = (PyCFunctionObject*) op; - if (unlikely(op == NULL)) - return NULL; - op->flags = flags; - __Pyx_CyFunction_weakreflist(op) = NULL; - cf->m_ml = ml; - cf->m_self = (PyObject *) op; - Py_XINCREF(closure); - op->func_closure = closure; - Py_XINCREF(module); - cf->m_module = module; - op->func_dict = NULL; - op->func_name = NULL; - Py_INCREF(qualname); - op->func_qualname = qualname; - op->func_doc = NULL; -#if PY_VERSION_HEX < 0x030900B1 - op->func_classobj = NULL; -#else - ((PyCMethodObject*)op)->mm_class = NULL; -#endif - op->func_globals = globals; - Py_INCREF(op->func_globals); - Py_XINCREF(code); - op->func_code = code; - op->defaults_pyobjects = 0; - op->defaults_size = 0; - op->defaults = NULL; - op->defaults_tuple = NULL; - op->defaults_kwdict = NULL; - op->defaults_getter = NULL; - op->func_annotations = NULL; - op->func_is_coroutine = NULL; -#if CYTHON_METH_FASTCALL - switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { - case METH_NOARGS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; - break; - case METH_O: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; - break; - case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; - break; - case METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; - break; - case METH_VARARGS | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = NULL; - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - Py_DECREF(op); - return NULL; - } -#endif - return (PyObject *) op; -} -static int -__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) -{ - Py_CLEAR(m->func_closure); - Py_CLEAR(((PyCFunctionObject*)m)->m_module); - Py_CLEAR(m->func_dict); - Py_CLEAR(m->func_name); - Py_CLEAR(m->func_qualname); - Py_CLEAR(m->func_doc); - Py_CLEAR(m->func_globals); - Py_CLEAR(m->func_code); -#if PY_VERSION_HEX < 0x030900B1 - Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); -#else - { - PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; - ((PyCMethodObject *) (m))->mm_class = NULL; - Py_XDECREF(cls); - } -#endif - Py_CLEAR(m->defaults_tuple); - Py_CLEAR(m->defaults_kwdict); - Py_CLEAR(m->func_annotations); - Py_CLEAR(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_XDECREF(pydefaults[i]); - PyObject_Free(m->defaults); - m->defaults = NULL; - } - return 0; -} -static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - if (__Pyx_CyFunction_weakreflist(m) != NULL) - PyObject_ClearWeakRefs((PyObject *) m); - __Pyx_CyFunction_clear(m); - __Pyx_PyHeapTypeObject_GC_Del(m); -} -static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - PyObject_GC_UnTrack(m); - __Pyx__CyFunction_dealloc(m); -} -static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) -{ - Py_VISIT(m->func_closure); - Py_VISIT(((PyCFunctionObject*)m)->m_module); - Py_VISIT(m->func_dict); - Py_VISIT(m->func_name); - Py_VISIT(m->func_qualname); - Py_VISIT(m->func_doc); - Py_VISIT(m->func_globals); - Py_VISIT(m->func_code); - Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); - Py_VISIT(m->defaults_tuple); - Py_VISIT(m->defaults_kwdict); - Py_VISIT(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_VISIT(pydefaults[i]); - } - return 0; -} -static PyObject* -__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) -{ -#if PY_MAJOR_VERSION >= 3 - return PyUnicode_FromFormat("", - op->func_qualname, (void *)op); -#else - return PyString_FromFormat("", - PyString_AsString(op->func_qualname), (void *)op); -#endif -} -static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { - PyCFunctionObject* f = (PyCFunctionObject*)func; - PyCFunction meth = f->m_ml->ml_meth; - Py_ssize_t size; - switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { - case METH_VARARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) - return (*meth)(self, arg); - break; - case METH_VARARGS | METH_KEYWORDS: - return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); - case METH_NOARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 0)) - return (*meth)(self, NULL); - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - case METH_O: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 1)) { - PyObject *result, *arg0; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - arg0 = PyTuple_GET_ITEM(arg, 0); - #else - arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; - #endif - result = (*meth)(self, arg0); - #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(arg0); - #endif - return result; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - return NULL; - } - PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", - f->m_ml->ml_name); - return NULL; -} -static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { - return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); -} -static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { - PyObject *result; - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; -#if CYTHON_METH_FASTCALL - __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); - if (vc) { -#if CYTHON_ASSUME_SAFE_MACROS - return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); -#else - (void) &__Pyx_PyVectorcall_FastCallDict; - return PyVectorcall_Call(func, args, kw); -#endif - } -#endif - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - Py_ssize_t argc; - PyObject *new_args; - PyObject *self; - argc = PyTuple_GET_SIZE(args); - new_args = PyTuple_GetSlice(args, 1, argc); - if (unlikely(!new_args)) - return NULL; - self = PyTuple_GetItem(args, 0); - if (unlikely(!self)) { - Py_DECREF(new_args); - return NULL; - } - result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); - Py_DECREF(new_args); - } else { - result = __Pyx_CyFunction_Call(func, args, kw); - } - return result; -} -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) -{ - int ret = 0; - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - if (unlikely(nargs < 1)) { - PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", - ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - ret = 1; - } - if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - return ret; -} -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 0)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, NULL); -} -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 1)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, args[0]); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; - PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, nargs, kwnames); -} -#endif -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_CyFunctionType_slots[] = { - {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, - {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, - {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, - {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, - {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, - {Py_tp_methods, (void *)__pyx_CyFunction_methods}, - {Py_tp_members, (void *)__pyx_CyFunction_members}, - {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, - {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, - {0, 0}, -}; -static PyType_Spec __pyx_CyFunctionType_spec = { - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - __pyx_CyFunctionType_slots -}; -#else -static PyTypeObject __pyx_CyFunctionType_type = { - PyVarObject_HEAD_INIT(0, 0) - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, - (destructor) __Pyx_CyFunction_dealloc, -#if !CYTHON_METH_FASTCALL - 0, -#elif CYTHON_BACKPORT_VECTORCALL - (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), -#else - offsetof(PyCFunctionObject, vectorcall), -#endif - 0, - 0, -#if PY_MAJOR_VERSION < 3 - 0, -#else - 0, -#endif - (reprfunc) __Pyx_CyFunction_repr, - 0, - 0, - 0, - 0, - __Pyx_CyFunction_CallAsMethod, - 0, - 0, - 0, - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#ifdef _Py_TPFLAGS_HAVE_VECTORCALL - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - 0, - (traverseproc) __Pyx_CyFunction_traverse, - (inquiry) __Pyx_CyFunction_clear, - 0, -#if PY_VERSION_HEX < 0x030500A0 - offsetof(__pyx_CyFunctionObject, func_weakreflist), -#else - offsetof(PyCFunctionObject, m_weakreflist), -#endif - 0, - 0, - __pyx_CyFunction_methods, - __pyx_CyFunction_members, - __pyx_CyFunction_getsets, - 0, - 0, - __Pyx_PyMethod_New, - 0, - offsetof(__pyx_CyFunctionObject, func_dict), - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -#if PY_VERSION_HEX >= 0x030400a1 - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, -#endif -}; -#endif -static int __pyx_CyFunction_init(PyObject *module) { -#if CYTHON_USE_TYPE_SPECS - __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); -#else - (void) module; - __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); -#endif - if (unlikely(__pyx_CyFunctionType == NULL)) { - return -1; - } - return 0; -} -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults = PyObject_Malloc(size); - if (unlikely(!m->defaults)) - return PyErr_NoMemory(); - memset(m->defaults, 0, size); - m->defaults_pyobjects = pyobjects; - m->defaults_size = size; - return m->defaults; -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_tuple = tuple; - Py_INCREF(tuple); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_kwdict = dict; - Py_INCREF(dict); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->func_annotations = dict; - Py_INCREF(dict); -} - -/* CythonFunction */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyObject *op = __Pyx_CyFunction_Init( - PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), - ml, flags, qualname, closure, module, globals, code - ); - if (likely(op)) { - PyObject_GC_Track(op); - } - return op; -} - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* IterFinish */ -static CYTHON_INLINE int __Pyx_IterFinish(void) { -#if CYTHON_FAST_THREAD_STATE - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* exc_type = tstate->curexc_type; - if (unlikely(exc_type)) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { - PyObject *exc_value, *exc_tb; - exc_value = tstate->curexc_value; - exc_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; - Py_DECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_tb); - return 0; - } else { - return -1; - } - } - return 0; -#else - if (unlikely(PyErr_Occurred())) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -#endif -} - -/* PyObjectCallNoArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { - PyObject *arg = NULL; - return __Pyx_PyObject_FastCall(func, (&arg)+1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* PyObjectGetMethod */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { - PyObject *attr; -#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP - __Pyx_TypeName type_name; - PyTypeObject *tp = Py_TYPE(obj); - PyObject *descr; - descrgetfunc f = NULL; - PyObject **dictptr, *dict; - int meth_found = 0; - assert (*method == NULL); - if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; - } - if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { - return 0; - } - descr = _PyType_Lookup(tp, name); - if (likely(descr != NULL)) { - Py_INCREF(descr); -#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR - if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) -#elif PY_MAJOR_VERSION >= 3 - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) - #endif -#else - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr))) - #endif -#endif - { - meth_found = 1; - } else { - f = Py_TYPE(descr)->tp_descr_get; - if (f != NULL && PyDescr_IsData(descr)) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - } - } - dictptr = _PyObject_GetDictPtr(obj); - if (dictptr != NULL && (dict = *dictptr) != NULL) { - Py_INCREF(dict); - attr = __Pyx_PyDict_GetItemStr(dict, name); - if (attr != NULL) { - Py_INCREF(attr); - Py_DECREF(dict); - Py_XDECREF(descr); - goto try_unpack; - } - Py_DECREF(dict); - } - if (meth_found) { - *method = descr; - return 1; - } - if (f != NULL) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - if (likely(descr != NULL)) { - *method = descr; - return 0; - } - type_name = __Pyx_PyType_GetName(tp); - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", - type_name, name); -#else - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", - type_name, PyString_AS_STRING(name)); -#endif - __Pyx_DECREF_TypeName(type_name); - return 0; -#else - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; -#endif -try_unpack: -#if CYTHON_UNPACK_METHODS - if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { - PyObject *function = PyMethod_GET_FUNCTION(attr); - Py_INCREF(function); - Py_DECREF(attr); - *method = function; - return 1; - } -#endif - *method = attr; - return 0; -} - -/* PyObjectCallMethod0 */ -static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { - PyObject *method = NULL, *result = NULL; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_CallOneArg(method, obj); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) goto bad; - result = __Pyx_PyObject_CallNoArg(method); - Py_DECREF(method); -bad: - return result; -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* UnpackItemEndCheck */ -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } - return __Pyx_IterFinish(); -} - -/* RaiseNoneIterError */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/* UnpackTupleError */ -static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { - if (t == Py_None) { - __Pyx_RaiseNoneNotIterableError(); - } else if (PyTuple_GET_SIZE(t) < index) { - __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); - } else { - __Pyx_RaiseTooManyValuesError(index); - } -} - -/* UnpackTuple2 */ -static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( - PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) { - PyObject *value1 = NULL, *value2 = NULL; -#if CYTHON_COMPILING_IN_PYPY - value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; - value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; -#else - value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1); - value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2); -#endif - if (decref_tuple) { - Py_DECREF(tuple); - } - *pvalue1 = value1; - *pvalue2 = value2; - return 0; -#if CYTHON_COMPILING_IN_PYPY -bad: - Py_XDECREF(value1); - Py_XDECREF(value2); - if (decref_tuple) { Py_XDECREF(tuple); } - return -1; -#endif -} -static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, - int has_known_size, int decref_tuple) { - Py_ssize_t index; - PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; - iternextfunc iternext; - iter = PyObject_GetIter(tuple); - if (unlikely(!iter)) goto bad; - if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } - iternext = __Pyx_PyObject_GetIterNextFunc(iter); - value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } - value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } - if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; - Py_DECREF(iter); - *pvalue1 = value1; - *pvalue2 = value2; - return 0; -unpacking_failed: - if (!has_known_size && __Pyx_IterFinish() == 0) - __Pyx_RaiseNeedMoreValuesError(index); -bad: - Py_XDECREF(iter); - Py_XDECREF(value1); - Py_XDECREF(value2); - if (decref_tuple) { Py_XDECREF(tuple); } - return -1; -} - -/* dict_iter */ -static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name, - Py_ssize_t* p_orig_length, int* p_source_is_dict) { - is_dict = is_dict || likely(PyDict_CheckExact(iterable)); - *p_source_is_dict = is_dict; - if (is_dict) { -#if !CYTHON_COMPILING_IN_PYPY - *p_orig_length = PyDict_Size(iterable); - Py_INCREF(iterable); - return iterable; -#elif PY_MAJOR_VERSION >= 3 - static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL; - PyObject **pp = NULL; - if (method_name) { - const char *name = PyUnicode_AsUTF8(method_name); - if (strcmp(name, "iteritems") == 0) pp = &py_items; - else if (strcmp(name, "iterkeys") == 0) pp = &py_keys; - else if (strcmp(name, "itervalues") == 0) pp = &py_values; - if (pp) { - if (!*pp) { - *pp = PyUnicode_FromString(name + 4); - if (!*pp) - return NULL; - } - method_name = *pp; - } - } -#endif - } - *p_orig_length = 0; - if (method_name) { - PyObject* iter; - iterable = __Pyx_PyObject_CallMethod0(iterable, method_name); - if (!iterable) - return NULL; -#if !CYTHON_COMPILING_IN_PYPY - if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable)) - return iterable; -#endif - iter = PyObject_GetIter(iterable); - Py_DECREF(iterable); - return iter; - } - return PyObject_GetIter(iterable); -} -static CYTHON_INLINE int __Pyx_dict_iter_next( - PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, - PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { - PyObject* next_item; -#if !CYTHON_COMPILING_IN_PYPY - if (source_is_dict) { - PyObject *key, *value; - if (unlikely(orig_length != PyDict_Size(iter_obj))) { - PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); - return -1; - } - if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) { - return 0; - } - if (pitem) { - PyObject* tuple = PyTuple_New(2); - if (unlikely(!tuple)) { - return -1; - } - Py_INCREF(key); - Py_INCREF(value); - PyTuple_SET_ITEM(tuple, 0, key); - PyTuple_SET_ITEM(tuple, 1, value); - *pitem = tuple; - } else { - if (pkey) { - Py_INCREF(key); - *pkey = key; - } - if (pvalue) { - Py_INCREF(value); - *pvalue = value; - } - } - return 1; - } else if (PyTuple_CheckExact(iter_obj)) { - Py_ssize_t pos = *ppos; - if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0; - *ppos = pos + 1; - next_item = PyTuple_GET_ITEM(iter_obj, pos); - Py_INCREF(next_item); - } else if (PyList_CheckExact(iter_obj)) { - Py_ssize_t pos = *ppos; - if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0; - *ppos = pos + 1; - next_item = PyList_GET_ITEM(iter_obj, pos); - Py_INCREF(next_item); - } else -#endif - { - next_item = PyIter_Next(iter_obj); - if (unlikely(!next_item)) { - return __Pyx_IterFinish(); - } - } - if (pitem) { - *pitem = next_item; - } else if (pkey && pvalue) { - if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1)) - return -1; - } else if (pkey) { - *pkey = next_item; - } else { - *pvalue = next_item; - } - return 1; -} - -/* JoinPyUnicode */ -static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, - Py_UCS4 max_char) { -#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - PyObject *result_uval; - int result_ukind, kind_shift; - Py_ssize_t i, char_pos; - void *result_udata; - CYTHON_MAYBE_UNUSED_VAR(max_char); -#if CYTHON_PEP393_ENABLED - result_uval = PyUnicode_New(result_ulength, max_char); - if (unlikely(!result_uval)) return NULL; - result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; - kind_shift = (result_ukind == PyUnicode_4BYTE_KIND) ? 2 : result_ukind - 1; - result_udata = PyUnicode_DATA(result_uval); -#else - result_uval = PyUnicode_FromUnicode(NULL, result_ulength); - if (unlikely(!result_uval)) return NULL; - result_ukind = sizeof(Py_UNICODE); - kind_shift = (result_ukind == 4) ? 2 : result_ukind - 1; - result_udata = PyUnicode_AS_UNICODE(result_uval); -#endif - assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0); - char_pos = 0; - for (i=0; i < value_count; i++) { - int ukind; - Py_ssize_t ulength; - void *udata; - PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); - if (unlikely(__Pyx_PyUnicode_READY(uval))) - goto bad; - ulength = __Pyx_PyUnicode_GET_LENGTH(uval); - if (unlikely(!ulength)) - continue; - if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos)) - goto overflow; - ukind = __Pyx_PyUnicode_KIND(uval); - udata = __Pyx_PyUnicode_DATA(uval); - if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { - memcpy((char *)result_udata + (char_pos << kind_shift), udata, (size_t) (ulength << kind_shift)); - } else { - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) - _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); - #else - Py_ssize_t j; - for (j=0; j < ulength; j++) { - Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); - __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); - } - #endif - } - char_pos += ulength; - } - return result_uval; -overflow: - PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); -bad: - Py_DECREF(result_uval); - return NULL; -#else - CYTHON_UNUSED_VAR(max_char); - CYTHON_UNUSED_VAR(result_ulength); - CYTHON_UNUSED_VAR(value_count); - return PyUnicode_Join(__pyx_empty_unicode, value_tuple); -#endif -} - -/* pep479 */ -static void __Pyx_Generator_Replace_StopIteration(int in_async_gen) { - PyObject *exc, *val, *tb, *cur_exc; - __Pyx_PyThreadState_declare - #ifdef __Pyx_StopAsyncIteration_USED - int is_async_stopiteration = 0; - #endif - CYTHON_MAYBE_UNUSED_VAR(in_async_gen); - cur_exc = PyErr_Occurred(); - if (likely(!__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopIteration))) { - #ifdef __Pyx_StopAsyncIteration_USED - if (in_async_gen && unlikely(__Pyx_PyErr_GivenExceptionMatches(cur_exc, __Pyx_PyExc_StopAsyncIteration))) { - is_async_stopiteration = 1; - } else - #endif - return; - } - __Pyx_PyThreadState_assign - __Pyx_GetException(&exc, &val, &tb); - Py_XDECREF(exc); - Py_XDECREF(val); - Py_XDECREF(tb); - PyErr_SetString(PyExc_RuntimeError, - #ifdef __Pyx_StopAsyncIteration_USED - is_async_stopiteration ? "async generator raised StopAsyncIteration" : - in_async_gen ? "async generator raised StopIteration" : - #endif - "generator raised StopIteration"); -} - -/* UnicodeConcatInPlace */ -# if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 -static int -__Pyx_unicode_modifiable(PyObject *unicode) -{ - if (Py_REFCNT(unicode) != 1) - return 0; - if (!PyUnicode_CheckExact(unicode)) - return 0; - if (PyUnicode_CHECK_INTERNED(unicode)) - return 0; - return 1; -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_ConcatInPlaceImpl(PyObject **p_left, PyObject *right - #if CYTHON_REFNANNY - , void* __pyx_refnanny - #endif - ) { - PyObject *left = *p_left; - Py_ssize_t left_len, right_len, new_len; - if (unlikely(__Pyx_PyUnicode_READY(left) == -1)) - return NULL; - if (unlikely(__Pyx_PyUnicode_READY(right) == -1)) - return NULL; - left_len = PyUnicode_GET_LENGTH(left); - if (left_len == 0) { - Py_INCREF(right); - return right; - } - right_len = PyUnicode_GET_LENGTH(right); - if (right_len == 0) { - Py_INCREF(left); - return left; - } - if (unlikely(left_len > PY_SSIZE_T_MAX - right_len)) { - PyErr_SetString(PyExc_OverflowError, - "strings are too large to concat"); - return NULL; - } - new_len = left_len + right_len; - if (__Pyx_unicode_modifiable(left) - && PyUnicode_CheckExact(right) - && PyUnicode_KIND(right) <= PyUnicode_KIND(left) - && !(PyUnicode_IS_ASCII(left) && !PyUnicode_IS_ASCII(right))) { - __Pyx_GIVEREF(*p_left); - if (unlikely(PyUnicode_Resize(p_left, new_len) != 0)) { - __Pyx_GOTREF(*p_left); - return NULL; - } - __Pyx_INCREF(*p_left); - _PyUnicode_FastCopyCharacters(*p_left, left_len, right, 0, right_len); - return *p_left; - } else { - return __Pyx_PyUnicode_Concat(left, right); - } - } -#endif - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *module = 0; - PyObject *empty_dict = 0; - PyObject *empty_list = 0; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (unlikely(!py_import)) - goto bad; - if (!from_list) { - empty_list = PyList_New(0); - if (unlikely(!empty_list)) - goto bad; - from_list = empty_list; - } - #endif - empty_dict = PyDict_New(); - if (unlikely(!empty_dict)) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, 1); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, 1); - #endif - if (unlikely(!module)) { - if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (unlikely(!py_level)) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, __pyx_d, empty_dict, from_list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, level); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, level); - #endif - #endif - } - } -bad: - Py_XDECREF(empty_dict); - Py_XDECREF(empty_list); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - return module; -} - -/* ImportDottedModule */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { - PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; - if (unlikely(PyErr_Occurred())) { - PyErr_Clear(); - } - if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { - partial_name = name; - } else { - slice = PySequence_GetSlice(parts_tuple, 0, count); - if (unlikely(!slice)) - goto bad; - sep = PyUnicode_FromStringAndSize(".", 1); - if (unlikely(!sep)) - goto bad; - partial_name = PyUnicode_Join(sep, slice); - } - PyErr_Format( -#if PY_MAJOR_VERSION < 3 - PyExc_ImportError, - "No module named '%s'", PyString_AS_STRING(partial_name)); -#else -#if PY_VERSION_HEX >= 0x030600B1 - PyExc_ModuleNotFoundError, -#else - PyExc_ImportError, -#endif - "No module named '%U'", partial_name); -#endif -bad: - Py_XDECREF(sep); - Py_XDECREF(slice); - Py_XDECREF(partial_name); - return NULL; -} -#endif -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { - PyObject *imported_module; -#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - return NULL; - imported_module = __Pyx_PyDict_GetItemStr(modules, name); - Py_XINCREF(imported_module); -#else - imported_module = PyImport_GetModule(name); -#endif - return imported_module; -} -#endif -static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if PY_MAJOR_VERSION < 3 - PyObject *module, *from_list, *star = __pyx_n_s__9; - CYTHON_UNUSED_VAR(parts_tuple); - from_list = PyList_New(1); - if (unlikely(!from_list)) - return NULL; - Py_INCREF(star); - PyList_SET_ITEM(from_list, 0, star); - module = __Pyx_Import(name, from_list, 0); - Py_DECREF(from_list); - return module; -#else - Py_ssize_t i, nparts; - PyObject *imported_module; - PyObject *module = __Pyx_Import(name, NULL, 0); - if (!parts_tuple || unlikely(!module)) - return module; - imported_module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(imported_module)) { - Py_DECREF(module); - return imported_module; - } - PyErr_Clear(); - nparts = PyTuple_GET_SIZE(parts_tuple); - for (i=1; i < nparts && module; i++) { - PyObject *part, *submodule; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - part = PyTuple_GET_ITEM(parts_tuple, i); -#else - part = PySequence_ITEM(parts_tuple, i); -#endif - submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(part); -#endif - Py_DECREF(module); - module = submodule; - } - if (likely(module)) - return module; - return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); -#endif -} -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 - PyObject *module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(module)) { - PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, __pyx_n_s_spec); - if (likely(spec)) { - PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, __pyx_n_s_initializing); - if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { - Py_DECREF(spec); - spec = NULL; - } - Py_XDECREF(unsafe); - } - if (likely(!spec)) { - PyErr_Clear(); - return module; - } - Py_DECREF(spec); - Py_DECREF(module); - } else if (PyErr_Occurred()) { - PyErr_Clear(); - } -#endif - return __Pyx__ImportDottedModule(name, parts_tuple); -} - -/* PyObjectSetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_setattro)) - return tp->tp_setattro(obj, attr_name, value); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_setattr)) - return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); -#endif - return PyObject_SetAttr(obj, attr_name, value); -} -#endif - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (unlikely(!j)) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; - PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; - if (mm && mm->mp_subscript) { - PyObject *r, *key = PyInt_FromSsize_t(i); - if (unlikely(!key)) return NULL; - r = mm->mp_subscript(o, key); - Py_DECREF(key); - return r; - } - if (likely(sm && sm->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { - Py_ssize_t l = sm->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return sm->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { - PyObject *runerr; - Py_ssize_t key_value; - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, - "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); - __Pyx_DECREF_TypeName(index_type_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { - __Pyx_TypeName obj_type_name; - if (likely(PyType_Check(obj))) { - PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_n_s_class_getitem); - if (meth) { - PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); - Py_DECREF(meth); - return result; - } - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { - PyTypeObject *tp = Py_TYPE(obj); - PyMappingMethods *mm = tp->tp_as_mapping; - PySequenceMethods *sm = tp->tp_as_sequence; - if (likely(mm && mm->mp_subscript)) { - return mm->mp_subscript(obj, key); - } - if (likely(sm && sm->sq_item)) { - return __Pyx_PyObject_GetIndex(obj, key); - } - return __Pyx_PyObject_GetItem_Slow(obj, key); -} -#endif - -/* UnpackUnboundCMethod */ -static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { - PyObject *method; - method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); - if (unlikely(!method)) - return -1; - target->method = method; -#if CYTHON_COMPILING_IN_CPYTHON - #if PY_MAJOR_VERSION >= 3 - if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) - #endif - { - PyMethodDescrObject *descr = (PyMethodDescrObject*) method; - target->func = descr->d_method->ml_meth; - target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); - } -#endif - return 0; -} - -/* CallUnboundCMethod1 */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) { - if (likely(cfunc->func)) { - int flag = cfunc->flag; - if (flag == METH_O) { - return (*(cfunc->func))(self, arg); - } else if ((PY_VERSION_HEX >= 0x030600B1) && flag == METH_FASTCALL) { - if ((PY_VERSION_HEX >= 0x030700A0)) { - return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1); - } else { - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); - } - } else if ((PY_VERSION_HEX >= 0x030700A0) && flag == (METH_FASTCALL | METH_KEYWORDS)) { - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL); - } - } - return __Pyx__CallUnboundCMethod1(cfunc, self, arg); -} -#endif -static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){ - PyObject *args, *result = NULL; - if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_COMPILING_IN_CPYTHON - if (cfunc->func && (cfunc->flag & METH_VARARGS)) { - args = PyTuple_New(1); - if (unlikely(!args)) goto bad; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - if (cfunc->flag & METH_KEYWORDS) - result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); - else - result = (*cfunc->func)(self, args); - } else { - args = PyTuple_New(2); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 1, arg); - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - } -#else - args = PyTuple_Pack(2, self, arg); - if (unlikely(!args)) goto bad; - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); -#endif -bad: - Py_XDECREF(args); - return result; -} - -/* CallUnboundCMethod2 */ -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1 -static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) { - if (likely(cfunc->func)) { - PyObject *args[2] = {arg1, arg2}; - if (cfunc->flag == METH_FASTCALL) { - #if PY_VERSION_HEX >= 0x030700A0 - return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, args, 2); - #else - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); - #endif - } - #if PY_VERSION_HEX >= 0x030700A0 - if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS)) - return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL); - #endif - } - return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2); -} -#endif -static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){ - PyObject *args, *result = NULL; - if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; -#if CYTHON_COMPILING_IN_CPYTHON - if (cfunc->func && (cfunc->flag & METH_VARARGS)) { - args = PyTuple_New(2); - if (unlikely(!args)) goto bad; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - if (cfunc->flag & METH_KEYWORDS) - result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL); - else - result = (*cfunc->func)(self, args); - } else { - args = PyTuple_New(3); - if (unlikely(!args)) goto bad; - Py_INCREF(self); - PyTuple_SET_ITEM(args, 0, self); - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 1, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 2, arg2); - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); - } -#else - args = PyTuple_Pack(3, self, arg1, arg2); - if (unlikely(!args)) goto bad; - result = __Pyx_PyObject_Call(cfunc->method, args, NULL); -#endif -bad: - Py_XDECREF(args); - return result; -} - -/* dict_getitem_default */ -static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) { - PyObject* value; -#if PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) - value = PyDict_GetItemWithError(d, key); - if (unlikely(!value)) { - if (unlikely(PyErr_Occurred())) - return NULL; - value = default_value; - } - Py_INCREF(value); - if ((1)); -#else - if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) { - value = PyDict_GetItem(d, key); - if (unlikely(!value)) { - value = default_value; - } - Py_INCREF(value); - } -#endif - else { - if (default_value == Py_None) - value = __Pyx_CallUnboundCMethod1(&__pyx_umethod_PyDict_Type_get, d, key); - else - value = __Pyx_CallUnboundCMethod2(&__pyx_umethod_PyDict_Type_get, d, key, default_value); - } - return value; -} - -/* PyObjectLookupSpecial */ -#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx__PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name, int with_error) { - PyObject *res; - PyTypeObject *tp = Py_TYPE(obj); -#if PY_MAJOR_VERSION < 3 - if (unlikely(PyInstance_Check(obj))) - return with_error ? __Pyx_PyObject_GetAttrStr(obj, attr_name) : __Pyx_PyObject_GetAttrStrNoError(obj, attr_name); -#endif - res = _PyType_Lookup(tp, attr_name); - if (likely(res)) { - descrgetfunc f = Py_TYPE(res)->tp_descr_get; - if (!f) { - Py_INCREF(res); - } else { - res = f(res, obj, (PyObject *)tp); - } - } else if (with_error) { - PyErr_SetObject(PyExc_AttributeError, attr_name); - } - return res; -} -#endif - -/* PyFloatBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyFloat_TrueDivideObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { - const double b = floatval; - double a, result; - (void)inplace; (void)zerodivision_check; - if (likely(PyFloat_CheckExact(op1))) { -#if CYTHON_COMPILING_IN_LIMITED_API - a = __pyx_PyFloat_AsDouble(op1); -#else - a = PyFloat_AS_DOUBLE(op1); -#endif - - } else - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - a = (double) PyInt_AS_LONG(op1); - - } else - #endif - if (likely(PyLong_CheckExact(op1))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - switch (size) { - case 0: a = 0.0; break; - case -1: a = -(double) digits[0]; break; - case 1: a = (double) digits[0]; break; - case -2: - case 2: - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { - a = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -2) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - case -3: - case 3: - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { - a = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -3) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - case -4: - case 4: - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { - a = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -4) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - default: - #else - { - #endif - a = PyLong_AsDouble(op1); - if (unlikely(a == -1.0 && PyErr_Occurred())) return NULL; - } - } else { - return (inplace ? PyNumber_InPlaceTrueDivide : PyNumber_TrueDivide)(op1, op2); - } - PyFPE_START_PROTECT("divide", return NULL) - result = a / b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); -} -#endif - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_TrueDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long a = PyInt_AS_LONG(op1); - - if (8 * sizeof(long) <= 53 || likely(labs(a) <= ((PY_LONG_LONG)1 << 53))) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyInt_Type.tp_as_number->nb_true_divide(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT && 1 * PyLong_SHIFT < 53) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT && 1 * PyLong_SHIFT < 53) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT && 2 * PyLong_SHIFT < 53) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT && 2 * PyLong_SHIFT < 53) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT && 3 * PyLong_SHIFT < 53) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT && 3 * PyLong_SHIFT < 53) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_true_divide(op1, op2); - } - } - if ((8 * sizeof(long) <= 53 || likely(labs(a) <= ((PY_LONG_LONG)1 << 53))) - || __Pyx_sst_abs(size) <= 52 / PyLong_SHIFT) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyLong_Type.tp_as_number->nb_true_divide(op1, op2); - return PyLong_FromLong(x); - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("divide", return NULL) - result = ((double)a) / (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceTrueDivide : PyNumber_TrueDivide)(op1, op2); -} -#endif - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractCObj(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op2))) { - const long a = intval; - long x; - long b = PyInt_AS_LONG(op2); - - x = (long)((unsigned long)a - b); - if (likely((x^a) >= 0 || (x^~b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op2))) { - const long a = intval; - long b, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG lla = intval; - PY_LONG_LONG llb, llx; -#endif - const digit* digits = ((PyLongObject*)op2)->ob_digit; - const Py_ssize_t size = Py_SIZE(op2); - if (unlikely(size == 0)) { - return __Pyx_NewRef(op1); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - b = likely(size) ? digits[0] : 0; - if (size == -1) b = -b; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - b = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - llb = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - b = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - llb = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - b = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - llb = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - b = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - llb = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - b = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - llb = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - b = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - llb = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - } - x = a - b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla - llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op2)) { - const long a = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double b = __pyx_PyFloat_AsDouble(op2); -#else - double b = PyFloat_AS_DOUBLE(op2); -#endif - double result; - - PyFPE_START_PROTECT("subtract", return NULL) - result = ((double)a) - (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); -} -#endif - -/* DictGetItem */ - #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY -static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { - PyObject *value; - value = PyDict_GetItemWithError(d, key); - if (unlikely(!value)) { - if (!PyErr_Occurred()) { - if (unlikely(PyTuple_Check(key))) { - PyObject* args = PyTuple_Pack(1, key); - if (likely(args)) { - PyErr_SetObject(PyExc_KeyError, args); - Py_DECREF(args); - } - } else { - PyErr_SetObject(PyExc_KeyError, key); - } - } - return NULL; - } - Py_INCREF(value); - return value; -} -#endif - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - x = (long)((unsigned long)a - b); - if (likely((x^a) >= 0 || (x^~b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - return PyLong_FromLong(-intval); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - } - x = a - b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla - llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("subtract", return NULL) - result = ((double)a) - (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); -} -#endif - -/* RaiseException */ - #if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - __Pyx_PyThreadState_declare - CYTHON_UNUSED_VAR(cause); - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - x = (long)((unsigned long)a + b); - if (likely((x^a) >= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - return __Pyx_NewRef(op2); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* SliceObject */ - static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, - Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, - int has_cstart, int has_cstop, int wraparound) { - __Pyx_TypeName obj_type_name; -#if CYTHON_USE_TYPE_SLOTS - PyMappingMethods* mp; -#if PY_MAJOR_VERSION < 3 - PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; - if (likely(ms && ms->sq_slice)) { - if (!has_cstart) { - if (_py_start && (*_py_start != Py_None)) { - cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); - if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstart = 0; - } - if (!has_cstop) { - if (_py_stop && (*_py_stop != Py_None)) { - cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); - if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstop = PY_SSIZE_T_MAX; - } - if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { - Py_ssize_t l = ms->sq_length(obj); - if (likely(l >= 0)) { - if (cstop < 0) { - cstop += l; - if (cstop < 0) cstop = 0; - } - if (cstart < 0) { - cstart += l; - if (cstart < 0) cstart = 0; - } - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - goto bad; - PyErr_Clear(); - } - } - return ms->sq_slice(obj, cstart, cstop); - } -#else - CYTHON_UNUSED_VAR(wraparound); -#endif - mp = Py_TYPE(obj)->tp_as_mapping; - if (likely(mp && mp->mp_subscript)) -#else - CYTHON_UNUSED_VAR(wraparound); -#endif - { - PyObject* result; - PyObject *py_slice, *py_start, *py_stop; - if (_py_slice) { - py_slice = *_py_slice; - } else { - PyObject* owned_start = NULL; - PyObject* owned_stop = NULL; - if (_py_start) { - py_start = *_py_start; - } else { - if (has_cstart) { - owned_start = py_start = PyInt_FromSsize_t(cstart); - if (unlikely(!py_start)) goto bad; - } else - py_start = Py_None; - } - if (_py_stop) { - py_stop = *_py_stop; - } else { - if (has_cstop) { - owned_stop = py_stop = PyInt_FromSsize_t(cstop); - if (unlikely(!py_stop)) { - Py_XDECREF(owned_start); - goto bad; - } - } else - py_stop = Py_None; - } - py_slice = PySlice_New(py_start, py_stop, Py_None); - Py_XDECREF(owned_start); - Py_XDECREF(owned_stop); - if (unlikely(!py_slice)) goto bad; - } -#if CYTHON_USE_TYPE_SLOTS - result = mp->mp_subscript(obj, py_slice); -#else - result = PyObject_GetItem(obj, py_slice); -#endif - if (!_py_slice) { - Py_DECREF(py_slice); - } - return result; - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is unsliceable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); -bad: - return NULL; -} - -/* ValidateBasesTuple */ - #if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS -static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) { - Py_ssize_t i, n = PyTuple_GET_SIZE(bases); - for (i = 1; i < n; i++) - { - PyObject *b0 = PyTuple_GET_ITEM(bases, i); - PyTypeObject *b; -#if PY_MAJOR_VERSION < 3 - if (PyClass_Check(b0)) - { - PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class", - PyString_AS_STRING(((PyClassObject*)b0)->cl_name)); - return -1; - } -#endif - b = (PyTypeObject*) b0; - if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE)) - { - __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); - PyErr_Format(PyExc_TypeError, - "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name); - __Pyx_DECREF_TypeName(b_name); - return -1; - } - if (dictoffset == 0 && b->tp_dictoffset) - { - __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); - PyErr_Format(PyExc_TypeError, - "extension type '%.200s' has no __dict__ slot, " - "but base type '" __Pyx_FMT_TYPENAME "' has: " - "either add 'cdef dict __dict__' to the extension type " - "or add '__slots__ = [...]' to the base type", - type_name, b_name); - __Pyx_DECREF_TypeName(b_name); - return -1; - } - } - return 0; -} -#endif - -/* PyType_Ready */ - static int __Pyx_PyType_Ready(PyTypeObject *t) { -#if CYTHON_USE_TYPE_SPECS || !(CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API) || defined(PYSTON_MAJOR_VERSION) - (void)__Pyx_PyObject_CallMethod0; -#if CYTHON_USE_TYPE_SPECS - (void)__Pyx_validate_bases_tuple; -#endif - return PyType_Ready(t); -#else - int r; - PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); - if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1)) - return -1; -#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) - { - int gc_was_enabled; - #if PY_VERSION_HEX >= 0x030A00b1 - gc_was_enabled = PyGC_Disable(); - (void)__Pyx_PyObject_CallMethod0; - #else - PyObject *ret, *py_status; - PyObject *gc = NULL; - #if PY_VERSION_HEX >= 0x030700a1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) - gc = PyImport_GetModule(__pyx_kp_u_gc); - #endif - if (unlikely(!gc)) gc = PyImport_Import(__pyx_kp_u_gc); - if (unlikely(!gc)) return -1; - py_status = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_isenabled); - if (unlikely(!py_status)) { - Py_DECREF(gc); - return -1; - } - gc_was_enabled = __Pyx_PyObject_IsTrue(py_status); - Py_DECREF(py_status); - if (gc_was_enabled > 0) { - ret = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_disable); - if (unlikely(!ret)) { - Py_DECREF(gc); - return -1; - } - Py_DECREF(ret); - } else if (unlikely(gc_was_enabled == -1)) { - Py_DECREF(gc); - return -1; - } - #endif - t->tp_flags |= Py_TPFLAGS_HEAPTYPE; -#else - (void)__Pyx_PyObject_CallMethod0; -#endif - r = PyType_Ready(t); -#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) - t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE; - #if PY_VERSION_HEX >= 0x030A00b1 - if (gc_was_enabled) - PyGC_Enable(); - #else - if (gc_was_enabled) { - PyObject *tp, *v, *tb; - PyErr_Fetch(&tp, &v, &tb); - ret = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_enable); - if (likely(ret || r == -1)) { - Py_XDECREF(ret); - PyErr_Restore(tp, v, tb); - } else { - Py_XDECREF(tp); - Py_XDECREF(v); - Py_XDECREF(tb); - r = -1; - } - } - Py_DECREF(gc); - #endif - } -#endif - return r; -#endif -} - -/* PyObject_GenericGetAttrNoDict */ - #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - __Pyx_TypeName type_name = __Pyx_PyType_GetName(tp); - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", - type_name, attr_name); -#else - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", - type_name, PyString_AS_STRING(attr_name)); -#endif - __Pyx_DECREF_TypeName(type_name); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - Py_INCREF(descr); - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - -/* ImportFrom */ - static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - const char* module_name_str = 0; - PyObject* module_name = 0; - PyObject* module_dot = 0; - PyObject* full_name = 0; - PyErr_Clear(); - module_name_str = PyModule_GetName(module); - if (unlikely(!module_name_str)) { goto modbad; } - module_name = PyUnicode_FromString(module_name_str); - if (unlikely(!module_name)) { goto modbad; } - module_dot = PyUnicode_Concat(module_name, __pyx_kp_u__10); - if (unlikely(!module_dot)) { goto modbad; } - full_name = PyUnicode_Concat(module_dot, name); - if (unlikely(!full_name)) { goto modbad; } - #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - { - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - goto modbad; - value = PyObject_GetItem(modules, full_name); - } - #else - value = PyImport_GetModule(full_name); - #endif - modbad: - Py_XDECREF(full_name); - Py_XDECREF(module_dot); - Py_XDECREF(module_name); - } - if (unlikely(!value)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* CLineInTraceback */ - #ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ - #if !CYTHON_COMPILING_IN_LIMITED_API -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} -#endif - -/* AddTraceback */ - #include "compile.h" -#include "frameobject.h" -#include "traceback.h" -#if CYTHON_COMPILING_IN_LIMITED_API -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - if (c_line) { - (void) __pyx_cfilenm; - c_line = __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); - } - _PyTraceback_Add(funcname, filename, c_line ? -c_line : py_line); -} -#else -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = NULL; - PyObject *py_funcname = NULL; - #if PY_MAJOR_VERSION < 3 - PyObject *py_srcfile = NULL; - py_srcfile = PyString_FromString(filename); - if (!py_srcfile) goto bad; - #endif - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - funcname = PyUnicode_AsUTF8(py_funcname); - if (!funcname) goto bad; - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - #endif - } - #if PY_MAJOR_VERSION < 3 - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - #else - py_code = PyCode_NewEmpty(filename, funcname, py_line); - #endif - Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline - return py_code; -bad: - Py_XDECREF(py_funcname); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_srcfile); - #endif - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} -#endif - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* FormatTypeName */ - #if CYTHON_COMPILING_IN_LIMITED_API -static __Pyx_TypeName -__Pyx_PyType_GetName(PyTypeObject* tp) -{ - PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, - __pyx_n_s_name_2); - if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { - PyErr_Clear(); - Py_XSETREF(name, __Pyx_NewRef(__pyx_n_s__20)); - } - return name; -} -#endif - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(long) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(long) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if ((sizeof(long) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if (CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(int) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(int) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if ((sizeof(int) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if (CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* FastTypeChecks */ - #if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (cls == a || cls == b) return 1; - mro = cls->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - PyObject *base = PyTuple_GET_ITEM(mro, i); - if (base == (PyObject *)a || base == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - if (exc_type1) { - return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); - } else { - return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i -#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom) -static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *__pyx_tstate, PyObject **pvalue) { - PyObject *et, *ev, *tb; - PyObject *value = NULL; - CYTHON_UNUSED_VAR(__pyx_tstate); - __Pyx_ErrFetch(&et, &ev, &tb); - if (!et) { - Py_XDECREF(tb); - Py_XDECREF(ev); - Py_INCREF(Py_None); - *pvalue = Py_None; - return 0; - } - if (likely(et == PyExc_StopIteration)) { - if (!ev) { - Py_INCREF(Py_None); - value = Py_None; - } -#if PY_VERSION_HEX >= 0x030300A0 - else if (likely(__Pyx_IS_TYPE(ev, (PyTypeObject*)PyExc_StopIteration))) { - value = ((PyStopIterationObject *)ev)->value; - Py_INCREF(value); - Py_DECREF(ev); - } -#endif - else if (unlikely(PyTuple_Check(ev))) { - if (PyTuple_GET_SIZE(ev) >= 1) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - value = PyTuple_GET_ITEM(ev, 0); - Py_INCREF(value); -#else - value = PySequence_ITEM(ev, 0); -#endif - } else { - Py_INCREF(Py_None); - value = Py_None; - } - Py_DECREF(ev); - } - else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) { - value = ev; - } - if (likely(value)) { - Py_XDECREF(tb); - Py_DECREF(et); - *pvalue = value; - return 0; - } - } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) { - __Pyx_ErrRestore(et, ev, tb); - return -1; - } - PyErr_NormalizeException(&et, &ev, &tb); - if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) { - __Pyx_ErrRestore(et, ev, tb); - return -1; - } - Py_XDECREF(tb); - Py_DECREF(et); -#if PY_VERSION_HEX >= 0x030300A0 - value = ((PyStopIterationObject *)ev)->value; - Py_INCREF(value); - Py_DECREF(ev); -#else - { - PyObject* args = __Pyx_PyObject_GetAttrStr(ev, __pyx_n_s_args); - Py_DECREF(ev); - if (likely(args)) { - value = PySequence_GetItem(args, 0); - Py_DECREF(args); - } - if (unlikely(!value)) { - __Pyx_ErrRestore(NULL, NULL, NULL); - Py_INCREF(Py_None); - value = Py_None; - } - } -#endif - *pvalue = value; - return 0; -} -static CYTHON_INLINE -void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) { - PyObject *t, *v, *tb; - t = exc_state->exc_type; - v = exc_state->exc_value; - tb = exc_state->exc_traceback; - exc_state->exc_type = NULL; - exc_state->exc_value = NULL; - exc_state->exc_traceback = NULL; - Py_XDECREF(t); - Py_XDECREF(v); - Py_XDECREF(tb); -} -#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL) -static void __Pyx__Coroutine_AlreadyRunningError(__pyx_CoroutineObject *gen) { - const char *msg; - CYTHON_MAYBE_UNUSED_VAR(gen); - if ((0)) { - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_Coroutine_Check((PyObject*)gen)) { - msg = "coroutine already executing"; - #endif - #ifdef __Pyx_AsyncGen_USED - } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) { - msg = "async generator already executing"; - #endif - } else { - msg = "generator already executing"; - } - PyErr_SetString(PyExc_ValueError, msg); -} -#define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL) -static void __Pyx__Coroutine_NotStartedError(PyObject *gen) { - const char *msg; - CYTHON_MAYBE_UNUSED_VAR(gen); - if ((0)) { - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_Coroutine_Check(gen)) { - msg = "can't send non-None value to a just-started coroutine"; - #endif - #ifdef __Pyx_AsyncGen_USED - } else if (__Pyx_AsyncGen_CheckExact(gen)) { - msg = "can't send non-None value to a just-started async generator"; - #endif - } else { - msg = "can't send non-None value to a just-started generator"; - } - PyErr_SetString(PyExc_TypeError, msg); -} -#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL) -static void __Pyx__Coroutine_AlreadyTerminatedError(PyObject *gen, PyObject *value, int closing) { - CYTHON_MAYBE_UNUSED_VAR(gen); - CYTHON_MAYBE_UNUSED_VAR(closing); - #ifdef __Pyx_Coroutine_USED - if (!closing && __Pyx_Coroutine_Check(gen)) { - PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine"); - } else - #endif - if (value) { - #ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(gen)) - PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration); - else - #endif - PyErr_SetNone(PyExc_StopIteration); - } -} -static -PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) { - __Pyx_PyThreadState_declare - PyThreadState *tstate; - __Pyx_ExcInfoStruct *exc_state; - PyObject *retval; - assert(!self->is_running); - if (unlikely(self->resume_label == 0)) { - if (unlikely(value && value != Py_None)) { - return __Pyx_Coroutine_NotStartedError((PyObject*)self); - } - } - if (unlikely(self->resume_label == -1)) { - return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing); - } -#if CYTHON_FAST_THREAD_STATE - __Pyx_PyThreadState_assign - tstate = __pyx_tstate; -#else - tstate = __Pyx_PyThreadState_Current; -#endif - exc_state = &self->gi_exc_state; - if (exc_state->exc_type) { - #if CYTHON_COMPILING_IN_PYPY - #else - if (exc_state->exc_traceback) { - PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback; - PyFrameObject *f = tb->tb_frame; - assert(f->f_back == NULL); - #if PY_VERSION_HEX >= 0x030B00A1 - f->f_back = PyThreadState_GetFrame(tstate); - #else - Py_XINCREF(tstate->frame); - f->f_back = tstate->frame; - #endif - } - #endif - } -#if CYTHON_USE_EXC_INFO_STACK - exc_state->previous_item = tstate->exc_info; - tstate->exc_info = exc_state; -#else - if (exc_state->exc_type) { - __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); - } else { - __Pyx_Coroutine_ExceptionClear(exc_state); - __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback); - } -#endif - self->is_running = 1; - retval = self->body(self, tstate, value); - self->is_running = 0; -#if CYTHON_USE_EXC_INFO_STACK - exc_state = &self->gi_exc_state; - tstate->exc_info = exc_state->previous_item; - exc_state->previous_item = NULL; - __Pyx_Coroutine_ResetFrameBackpointer(exc_state); -#endif - return retval; -} -static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) { - PyObject *exc_tb = exc_state->exc_traceback; - if (likely(exc_tb)) { -#if CYTHON_COMPILING_IN_PYPY -#else - PyTracebackObject *tb = (PyTracebackObject *) exc_tb; - PyFrameObject *f = tb->tb_frame; - Py_CLEAR(f->f_back); -#endif - } -} -static CYTHON_INLINE -PyObject *__Pyx_Coroutine_MethodReturn(PyObject* gen, PyObject *retval) { - CYTHON_MAYBE_UNUSED_VAR(gen); - if (unlikely(!retval)) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (!__Pyx_PyErr_Occurred()) { - PyObject *exc = PyExc_StopIteration; - #ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(gen)) - exc = __Pyx_PyExc_StopAsyncIteration; - #endif - __Pyx_PyErr_SetNone(exc); - } - } - return retval; -} -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) -static CYTHON_INLINE -PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) { -#if PY_VERSION_HEX <= 0x030A00A1 - return _PyGen_Send(gen, arg); -#else - PyObject *result; - if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) { - if (PyAsyncGen_CheckExact(gen)) { - assert(result == Py_None); - PyErr_SetNone(PyExc_StopAsyncIteration); - } - else if (result == Py_None) { - PyErr_SetNone(PyExc_StopIteration); - } - else { - _PyGen_SetStopIterationValue(result); - } - Py_CLEAR(result); - } - return result; -#endif -} -#endif -static CYTHON_INLINE -PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) { - PyObject *ret; - PyObject *val = NULL; - __Pyx_Coroutine_Undelegate(gen); - __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val); - ret = __Pyx_Coroutine_SendEx(gen, val, 0); - Py_XDECREF(val); - return ret; -} -static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) { - PyObject *retval; - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; - PyObject *yf = gen->yieldfrom; - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - gen->is_running = 1; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - ret = __Pyx_Coroutine_Send(yf, value); - } else - #endif - #ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(yf)) { - ret = __Pyx_Coroutine_Send(yf, value); - } else - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_PyAsyncGenASend_CheckExact(yf)) { - ret = __Pyx_async_gen_asend_send(yf, value); - } else - #endif - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) - if (PyGen_CheckExact(yf)) { - ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); - } else - #endif - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) - if (PyCoro_CheckExact(yf)) { - ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value); - } else - #endif - { - if (value == Py_None) - ret = __Pyx_PyObject_GetIterNextFunc(yf)(yf); - else - ret = __Pyx_PyObject_CallMethod1(yf, __pyx_n_s_send, value); - } - gen->is_running = 0; - if (likely(ret)) { - return ret; - } - retval = __Pyx_Coroutine_FinishDelegation(gen); - } else { - retval = __Pyx_Coroutine_SendEx(gen, value, 0); - } - return __Pyx_Coroutine_MethodReturn(self, retval); -} -static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) { - PyObject *retval = NULL; - int err = 0; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - retval = __Pyx_Coroutine_Close(yf); - if (!retval) - return -1; - } else - #endif - #ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(yf)) { - retval = __Pyx_Coroutine_Close(yf); - if (!retval) - return -1; - } else - if (__Pyx_CoroutineAwait_CheckExact(yf)) { - retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL); - if (!retval) - return -1; - } else - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_PyAsyncGenASend_CheckExact(yf)) { - retval = __Pyx_async_gen_asend_close(yf, NULL); - } else - if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) { - retval = __Pyx_async_gen_athrow_close(yf, NULL); - } else - #endif - { - PyObject *meth; - gen->is_running = 1; - meth = __Pyx_PyObject_GetAttrStrNoError(yf, __pyx_n_s_close); - if (unlikely(!meth)) { - if (unlikely(PyErr_Occurred())) { - PyErr_WriteUnraisable(yf); - } - } else { - retval = __Pyx_PyObject_CallNoArg(meth); - Py_DECREF(meth); - if (unlikely(!retval)) - err = -1; - } - gen->is_running = 0; - } - Py_XDECREF(retval); - return err; -} -static PyObject *__Pyx_Generator_Next(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self; - PyObject *yf = gen->yieldfrom; - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - gen->is_running = 1; - #ifdef __Pyx_Generator_USED - if (__Pyx_Generator_CheckExact(yf)) { - ret = __Pyx_Generator_Next(yf); - } else - #endif - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3) - if (PyGen_CheckExact(yf)) { - ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL); - } else - #endif - #ifdef __Pyx_Coroutine_USED - if (__Pyx_Coroutine_Check(yf)) { - ret = __Pyx_Coroutine_Send(yf, Py_None); - } else - #endif - ret = __Pyx_PyObject_GetIterNextFunc(yf)(yf); - gen->is_running = 0; - if (likely(ret)) { - return ret; - } - return __Pyx_Coroutine_FinishDelegation(gen); - } - return __Pyx_Coroutine_SendEx(gen, Py_None, 0); -} -static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, PyObject *arg) { - CYTHON_UNUSED_VAR(arg); - return __Pyx_Coroutine_Close(self); -} -static PyObject *__Pyx_Coroutine_Close(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject *retval, *raised_exception; - PyObject *yf = gen->yieldfrom; - int err = 0; - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - Py_INCREF(yf); - err = __Pyx_Coroutine_CloseIter(gen, yf); - __Pyx_Coroutine_Undelegate(gen); - Py_DECREF(yf); - } - if (err == 0) - PyErr_SetNone(PyExc_GeneratorExit); - retval = __Pyx_Coroutine_SendEx(gen, NULL, 1); - if (unlikely(retval)) { - const char *msg; - Py_DECREF(retval); - if ((0)) { - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_Coroutine_Check(self)) { - msg = "coroutine ignored GeneratorExit"; - #endif - #ifdef __Pyx_AsyncGen_USED - } else if (__Pyx_AsyncGen_CheckExact(self)) { -#if PY_VERSION_HEX < 0x03060000 - msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)"; -#else - msg = "async generator ignored GeneratorExit"; -#endif - #endif - } else { - msg = "generator ignored GeneratorExit"; - } - PyErr_SetString(PyExc_RuntimeError, msg); - return NULL; - } - raised_exception = PyErr_Occurred(); - if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) { - if (raised_exception) PyErr_Clear(); - Py_INCREF(Py_None); - return Py_None; - } - return NULL; -} -static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb, - PyObject *args, int close_on_genexit) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject *yf = gen->yieldfrom; - if (unlikely(gen->is_running)) - return __Pyx_Coroutine_AlreadyRunningError(gen); - if (yf) { - PyObject *ret; - Py_INCREF(yf); - if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) { - int err = __Pyx_Coroutine_CloseIter(gen, yf); - Py_DECREF(yf); - __Pyx_Coroutine_Undelegate(gen); - if (err < 0) - return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); - goto throw_here; - } - gen->is_running = 1; - if (0 - #ifdef __Pyx_Generator_USED - || __Pyx_Generator_CheckExact(yf) - #endif - #ifdef __Pyx_Coroutine_USED - || __Pyx_Coroutine_Check(yf) - #endif - ) { - ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit); - #ifdef __Pyx_Coroutine_USED - } else if (__Pyx_CoroutineAwait_CheckExact(yf)) { - ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit); - #endif - } else { - PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(yf, __pyx_n_s_throw); - if (unlikely(!meth)) { - Py_DECREF(yf); - if (unlikely(PyErr_Occurred())) { - gen->is_running = 0; - return NULL; - } - __Pyx_Coroutine_Undelegate(gen); - gen->is_running = 0; - goto throw_here; - } - if (likely(args)) { - ret = __Pyx_PyObject_Call(meth, args, NULL); - } else { - PyObject *cargs[4] = {NULL, typ, val, tb}; - ret = __Pyx_PyObject_FastCall(meth, cargs+1, 3 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); - } - Py_DECREF(meth); - } - gen->is_running = 0; - Py_DECREF(yf); - if (!ret) { - ret = __Pyx_Coroutine_FinishDelegation(gen); - } - return __Pyx_Coroutine_MethodReturn(self, ret); - } -throw_here: - __Pyx_Raise(typ, val, tb, NULL); - return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0)); -} -static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) { - PyObject *typ; - PyObject *val = NULL; - PyObject *tb = NULL; - if (unlikely(!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))) - return NULL; - return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1); -} -static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) { - Py_VISIT(exc_state->exc_type); - Py_VISIT(exc_state->exc_value); - Py_VISIT(exc_state->exc_traceback); - return 0; -} -static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) { - Py_VISIT(gen->closure); - Py_VISIT(gen->classobj); - Py_VISIT(gen->yieldfrom); - return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg); -} -static int __Pyx_Coroutine_clear(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - Py_CLEAR(gen->closure); - Py_CLEAR(gen->classobj); - Py_CLEAR(gen->yieldfrom); - __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state); -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(self)) { - Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer); - } -#endif - Py_CLEAR(gen->gi_code); - Py_CLEAR(gen->gi_frame); - Py_CLEAR(gen->gi_name); - Py_CLEAR(gen->gi_qualname); - Py_CLEAR(gen->gi_modulename); - return 0; -} -static void __Pyx_Coroutine_dealloc(PyObject *self) { - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - PyObject_GC_UnTrack(gen); - if (gen->gi_weakreflist != NULL) - PyObject_ClearWeakRefs(self); - if (gen->resume_label >= 0) { - PyObject_GC_Track(self); -#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE - if (unlikely(PyObject_CallFinalizerFromDealloc(self))) -#else - Py_TYPE(gen)->tp_del(self); - if (unlikely(Py_REFCNT(self) > 0)) -#endif - { - return; - } - PyObject_GC_UnTrack(self); - } -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(self)) { - /* We have to handle this case for asynchronous generators - right here, because this code has to be between UNTRACK - and GC_Del. */ - Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer); - } -#endif - __Pyx_Coroutine_clear(self); - __Pyx_PyHeapTypeObject_GC_Del(gen); -} -static void __Pyx_Coroutine_del(PyObject *self) { - PyObject *error_type, *error_value, *error_traceback; - __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self; - __Pyx_PyThreadState_declare - if (gen->resume_label < 0) { - return; - } -#if !CYTHON_USE_TP_FINALIZE - assert(self->ob_refcnt == 0); - __Pyx_SET_REFCNT(self, 1); -#endif - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&error_type, &error_value, &error_traceback); -#ifdef __Pyx_AsyncGen_USED - if (__Pyx_AsyncGen_CheckExact(self)) { - __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self; - PyObject *finalizer = agen->ag_finalizer; - if (finalizer && !agen->ag_closed) { - PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self); - if (unlikely(!res)) { - PyErr_WriteUnraisable(self); - } else { - Py_DECREF(res); - } - __Pyx_ErrRestore(error_type, error_value, error_traceback); - return; - } - } -#endif - if (unlikely(gen->resume_label == 0 && !error_value)) { -#ifdef __Pyx_Coroutine_USED -#ifdef __Pyx_Generator_USED - if (!__Pyx_Generator_CheckExact(self)) -#endif - { - PyObject_GC_UnTrack(self); -#if PY_MAJOR_VERSION >= 3 || defined(PyErr_WarnFormat) - if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0)) - PyErr_WriteUnraisable(self); -#else - {PyObject *msg; - char *cmsg; - #if CYTHON_COMPILING_IN_PYPY - msg = NULL; - cmsg = (char*) "coroutine was never awaited"; - #else - char *cname; - PyObject *qualname; - qualname = gen->gi_qualname; - cname = PyString_AS_STRING(qualname); - msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname); - if (unlikely(!msg)) { - PyErr_Clear(); - cmsg = (char*) "coroutine was never awaited"; - } else { - cmsg = PyString_AS_STRING(msg); - } - #endif - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0)) - PyErr_WriteUnraisable(self); - Py_XDECREF(msg);} -#endif - PyObject_GC_Track(self); - } -#endif - } else { - PyObject *res = __Pyx_Coroutine_Close(self); - if (unlikely(!res)) { - if (PyErr_Occurred()) - PyErr_WriteUnraisable(self); - } else { - Py_DECREF(res); - } - } - __Pyx_ErrRestore(error_type, error_value, error_traceback); -#if !CYTHON_USE_TP_FINALIZE - assert(Py_REFCNT(self) > 0); - if (likely(--self->ob_refcnt == 0)) { - return; - } - { - Py_ssize_t refcnt = Py_REFCNT(self); - _Py_NewReference(self); - __Pyx_SET_REFCNT(self, refcnt); - } -#if CYTHON_COMPILING_IN_CPYTHON - assert(PyType_IS_GC(Py_TYPE(self)) && - _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED); - _Py_DEC_REFTOTAL; -#endif -#ifdef COUNT_ALLOCS - --Py_TYPE(self)->tp_frees; - --Py_TYPE(self)->tp_allocs; -#endif -#endif -} -static PyObject * -__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, void *context) -{ - PyObject *name = self->gi_name; - CYTHON_UNUSED_VAR(context); - if (unlikely(!name)) name = Py_None; - Py_INCREF(name); - return name; -} -static int -__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(self->gi_name, value); - return 0; -} -static PyObject * -__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, void *context) -{ - PyObject *name = self->gi_qualname; - CYTHON_UNUSED_VAR(context); - if (unlikely(!name)) name = Py_None; - Py_INCREF(name); - return name; -} -static int -__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(self->gi_qualname, value); - return 0; -} -static PyObject * -__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, void *context) -{ - PyObject *frame = self->gi_frame; - CYTHON_UNUSED_VAR(context); - if (!frame) { - if (unlikely(!self->gi_code)) { - Py_RETURN_NONE; - } - frame = (PyObject *) PyFrame_New( - PyThreadState_Get(), /*PyThreadState *tstate,*/ - (PyCodeObject*) self->gi_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (unlikely(!frame)) - return NULL; - self->gi_frame = frame; - } - Py_INCREF(frame); - return frame; -} -static __pyx_CoroutineObject *__Pyx__Coroutine_New( - PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name) { - __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type); - if (unlikely(!gen)) - return NULL; - return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name); -} -static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit( - __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure, - PyObject *name, PyObject *qualname, PyObject *module_name) { - gen->body = body; - gen->closure = closure; - Py_XINCREF(closure); - gen->is_running = 0; - gen->resume_label = 0; - gen->classobj = NULL; - gen->yieldfrom = NULL; - gen->gi_exc_state.exc_type = NULL; - gen->gi_exc_state.exc_value = NULL; - gen->gi_exc_state.exc_traceback = NULL; -#if CYTHON_USE_EXC_INFO_STACK - gen->gi_exc_state.previous_item = NULL; -#endif - gen->gi_weakreflist = NULL; - Py_XINCREF(qualname); - gen->gi_qualname = qualname; - Py_XINCREF(name); - gen->gi_name = name; - Py_XINCREF(module_name); - gen->gi_modulename = module_name; - Py_XINCREF(code); - gen->gi_code = code; - gen->gi_frame = NULL; - PyObject_GC_Track(gen); - return gen; -} - -/* PatchModuleWithCoroutine */ - static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) { -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - int result; - PyObject *globals, *result_obj; - globals = PyDict_New(); if (unlikely(!globals)) goto ignore; - result = PyDict_SetItemString(globals, "_cython_coroutine_type", - #ifdef __Pyx_Coroutine_USED - (PyObject*)__pyx_CoroutineType); - #else - Py_None); - #endif - if (unlikely(result < 0)) goto ignore; - result = PyDict_SetItemString(globals, "_cython_generator_type", - #ifdef __Pyx_Generator_USED - (PyObject*)__pyx_GeneratorType); - #else - Py_None); - #endif - if (unlikely(result < 0)) goto ignore; - if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore; - if (unlikely(PyDict_SetItemString(globals, "__builtins__", __pyx_b) < 0)) goto ignore; - result_obj = PyRun_String(py_code, Py_file_input, globals, globals); - if (unlikely(!result_obj)) goto ignore; - Py_DECREF(result_obj); - Py_DECREF(globals); - return module; -ignore: - Py_XDECREF(globals); - PyErr_WriteUnraisable(module); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) { - Py_DECREF(module); - module = NULL; - } -#else - py_code++; -#endif - return module; -} - -/* PatchGeneratorABC */ - #ifndef CYTHON_REGISTER_ABCS -#define CYTHON_REGISTER_ABCS 1 -#endif -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) -static PyObject* __Pyx_patch_abc_module(PyObject *module); -static PyObject* __Pyx_patch_abc_module(PyObject *module) { - module = __Pyx_Coroutine_patch_module( - module, "" -"if _cython_generator_type is not None:\n" -" try: Generator = _module.Generator\n" -" except AttributeError: pass\n" -" else: Generator.register(_cython_generator_type)\n" -"if _cython_coroutine_type is not None:\n" -" try: Coroutine = _module.Coroutine\n" -" except AttributeError: pass\n" -" else: Coroutine.register(_cython_coroutine_type)\n" - ); - return module; -} -#endif -static int __Pyx_patch_abc(void) { -#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - static int abc_patched = 0; - if (CYTHON_REGISTER_ABCS && !abc_patched) { - PyObject *module; - module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections"); - if (unlikely(!module)) { - PyErr_WriteUnraisable(NULL); - if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, - ((PY_MAJOR_VERSION >= 3) ? - "Cython module failed to register with collections.abc module" : - "Cython module failed to register with collections module"), 1) < 0)) { - return -1; - } - } else { - module = __Pyx_patch_abc_module(module); - abc_patched = 1; - if (unlikely(!module)) - return -1; - Py_DECREF(module); - } - module = PyImport_ImportModule("backports_abc"); - if (module) { - module = __Pyx_patch_abc_module(module); - Py_XDECREF(module); - } - if (!module) { - PyErr_Clear(); - } - } -#else - if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL); -#endif - return 0; -} - -/* Generator */ - static PyMethodDef __pyx_Generator_methods[] = { - {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O, - (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")}, - {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS, - (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")}, - {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS, - (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")}, - {0, 0, 0, 0} -}; -static PyMemberDef __pyx_Generator_memberlist[] = { - {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL}, - {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY, - (char*) PyDoc_STR("object being iterated by 'yield from', or None")}, - {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL}, - {(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), 0, 0}, -#if CYTHON_USE_TYPE_SPECS - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CoroutineObject, gi_weakreflist), READONLY, 0}, -#endif - {0, 0, 0, 0, 0} -}; -static PyGetSetDef __pyx_Generator_getsets[] = { - {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name, - (char*) PyDoc_STR("name of the generator"), 0}, - {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname, - (char*) PyDoc_STR("qualified name of the generator"), 0}, - {(char *) "gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL, - (char*) PyDoc_STR("Frame of the generator"), 0}, - {0, 0, 0, 0, 0} -}; -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_GeneratorType_slots[] = { - {Py_tp_dealloc, (void *)__Pyx_Coroutine_dealloc}, - {Py_tp_traverse, (void *)__Pyx_Coroutine_traverse}, - {Py_tp_iter, (void *)PyObject_SelfIter}, - {Py_tp_iternext, (void *)__Pyx_Generator_Next}, - {Py_tp_methods, (void *)__pyx_Generator_methods}, - {Py_tp_members, (void *)__pyx_Generator_memberlist}, - {Py_tp_getset, (void *)__pyx_Generator_getsets}, - {Py_tp_getattro, (void *) __Pyx_PyObject_GenericGetAttrNoDict}, -#if CYTHON_USE_TP_FINALIZE - {Py_tp_finalize, (void *)__Pyx_Coroutine_del}, -#endif - {0, 0}, -}; -static PyType_Spec __pyx_GeneratorType_spec = { - __PYX_TYPE_MODULE_PREFIX "generator", - sizeof(__pyx_CoroutineObject), - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, - __pyx_GeneratorType_slots -}; -#else -static PyTypeObject __pyx_GeneratorType_type = { - PyVarObject_HEAD_INIT(0, 0) - __PYX_TYPE_MODULE_PREFIX "generator", - sizeof(__pyx_CoroutineObject), - 0, - (destructor) __Pyx_Coroutine_dealloc, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, - 0, - (traverseproc) __Pyx_Coroutine_traverse, - 0, - 0, - offsetof(__pyx_CoroutineObject, gi_weakreflist), - 0, - (iternextfunc) __Pyx_Generator_Next, - __pyx_Generator_methods, - __pyx_Generator_memberlist, - __pyx_Generator_getsets, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -#if CYTHON_USE_TP_FINALIZE - 0, -#else - __Pyx_Coroutine_del, -#endif - 0, -#if CYTHON_USE_TP_FINALIZE - __Pyx_Coroutine_del, -#elif PY_VERSION_HEX >= 0x030400a1 - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, -#endif -}; -#endif -static int __pyx_Generator_init(PyObject *module) { -#if CYTHON_USE_TYPE_SPECS - __pyx_GeneratorType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_GeneratorType_spec, NULL); -#else - (void) module; - __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict; - __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter; - __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type); -#endif - if (unlikely(!__pyx_GeneratorType)) { - return -1; - } - return 0; -} - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compile time version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - #if PY_MAJOR_VERSION >= 3 -static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { - if (t.is_unicode | t.is_str) { - if (t.intern) { - *str = PyUnicode_InternFromString(t.s); - } else if (t.encoding) { - *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); - } else { - *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); - } - } else { - *str = PyBytes_FromStringAndSize(t.s, t.n - 1); - } - if (!*str) - return -1; - if (PyObject_Hash(*str) == -1) - return -1; - return 0; -} -#endif -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION >= 3 - __Pyx_InitString(*t, t->p); - #else - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - #endif - ++t; - } - return 0; -} -#endif - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { - __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " - "The ability to return an instance of a strict subclass of int is deprecated, " - "and may be removed in a future version of Python.", - result_type_name)) { - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; - } - __Pyx_DECREF_TypeName(result_type_name); - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", - type_name, type_name, result_type_name); - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { - if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { - return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); -#if PY_MAJOR_VERSION < 3 - } else if (likely(PyInt_CheckExact(o))) { - return PyInt_AS_LONG(o); -#endif - } else { - Py_ssize_t ival; - PyObject *x; - x = PyNumber_Index(o); - if (!x) return -1; - ival = PyInt_AsLong(x); - Py_DECREF(x); - return ival; - } -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -/* #### Code section: utility_code_pragmas_end ### */ -#if _MSV_VER -#pragma warning( pop ) -#endif - - - -/* #### Code section: end ### */ -#endif /* Py_PYTHON_H */ diff --git a/spaces/matthoffner/chatbot-mini/components/Chatbar/components/ClearConversations.tsx b/spaces/matthoffner/chatbot-mini/components/Chatbar/components/ClearConversations.tsx deleted file mode 100644 index 5f17aacc511b7f531dc9df14e9e24bbaaf6c7fe9..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot-mini/components/Chatbar/components/ClearConversations.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import { IconCheck, IconTrash, IconX } from '@tabler/icons-react'; -import { FC, useState } from 'react'; - -import { useTranslation } from 'next-i18next'; - - -interface Props { - onClearConversations: () => void; -} - -export const ClearConversations: FC = ({ onClearConversations }) => { - const [isConfirming, setIsConfirming] = useState(false); - - const { t } = useTranslation('sidebar'); - - const handleClearConversations = () => { - onClearConversations(); - setIsConfirming(false); - }; - - return isConfirming ? ( -
                - - -
                - {t('Are you sure?')} -
                - -
                - { - e.stopPropagation(); - handleClearConversations(); - }} - /> - - { - e.stopPropagation(); - setIsConfirming(false); - }} - /> -
                -
                - ) : ( - null - ); -}; diff --git a/spaces/menghanxia/ReversibleHalftoning/app.py b/spaces/menghanxia/ReversibleHalftoning/app.py deleted file mode 100644 index 28887afe53d78c7d2c11d57dcb8a41eec6bba7c2..0000000000000000000000000000000000000000 --- a/spaces/menghanxia/ReversibleHalftoning/app.py +++ /dev/null @@ -1,90 +0,0 @@ -import gradio as gr -import os, requests -import numpy as np -import torch.nn.functional as F -from model.model import ResHalf -from inference import Inferencer -from utils import util - -## local | remote -RUN_MODE = "remote" -if RUN_MODE != "local": - os.system("wget https://huggingface.co/menghanxia/ReversibleHalftoning/resolve/main/model_best.pth.tar") - os.rename("model_best.pth.tar", "./checkpoints/model_best.pth.tar") - ## examples - os.system("wget https://huggingface.co/menghanxia/ReversibleHalftoning/resolve/main/girl.png") - os.system("wget https://huggingface.co/menghanxia/ReversibleHalftoning/resolve/main/wave.png") - os.system("wget https://huggingface.co/menghanxia/ReversibleHalftoning/resolve/main/painting.png") - -## step 1: set up model -device = "cpu" -checkpt_path = "checkpoints/model_best.pth.tar" -invhalfer = Inferencer(checkpoint_path=checkpt_path, model=ResHalf(train=False), use_cuda=False, multi_gpu=False) - - -def prepare_data(input_img, decoding_only=False): - input_img = np.array(input_img / 255., np.float32) - if decoding_only: - input_img = input_img[:,:,:1] - input_img = util.img2tensor(input_img * 2. - 1.) - return input_img - - -def run_invhalf(invhalfer, input_img, decoding_only, device="cuda"): - input_img = prepare_data(input_img, decoding_only) - input_img = input_img.to(device) - if decoding_only: - print('>>>:restoration mode') - resColor = invhalfer(input_img, decoding_only=decoding_only) - output = util.tensor2img(resColor / 2. + 0.5) * 255. - else: - print('>>>:halftoning mode') - resHalftone, resColor = invhalfer(input_img, decoding_only=decoding_only) - output = util.tensor2img(resHalftone / 2. + 0.5) * 255. - return np.clip(output, 0, 255).astype(np.uint8) - - -def click_run(input_img, decoding_only): - output = run_invhalf(invhalfer, input_img, decoding_only, device) - return output - - -def click_move(output_img, decoding_only): - if decoding_only: - radio_status = "Halftoning (Photo2Halftone)" - else: - radio_status = "Restoration (Halftone2Photo)" - return output_img, radio_status, None - -## step 2: configure interface -demo = gr.Blocks(title="ReversibleHalftoning") -with demo: - gr.Markdown(value=""" - **Gradio demo for ReversibleHalftoning: Deep Halftoning with Reversible Binary Pattern**. Check our [github page](https://github.com/MenghanXia/ReversibleHalftoning) 😛. - """) - with gr.Row(): - with gr.Column(): - Image_input = gr.Image(type="numpy", label="Input", interactive=True).style(height=480) - with gr.Row(): - Radio_mode = gr.Radio(type="index", choices=["Halftoning (Photo2Halftone)", "Restoration (Halftone2Photo)"], \ - label="Choose a running mode", value="Halftoning (Photo2Halftone)") - Button_run = gr.Button(value="Run") - with gr.Column(): - Image_output = gr.Image(type="numpy", label="Output").style(height=480) - Button_move = gr.Button(value="Use it as input") - - Button_run.click(fn=click_run, inputs=[Image_input, Radio_mode], outputs=Image_output) - Button_move.click(fn=click_move, inputs=[Image_output, Radio_mode], outputs=[Image_input, Radio_mode, Image_output]) - - if RUN_MODE != "local": - gr.Examples(examples=[ - ['girl.png', "Halftoning (Photo2Halftone)"], - ['wave.png', "Halftoning (Photo2Halftone)"], - ['painting.png', "Restoration (Halftone2Photo)"], - ], - inputs=[Image_input,Radio_mode], outputs=[Image_output], label="Examples") - -if RUN_MODE == "local": - demo.launch(server_name='9.134.253.83',server_port=7788) -else: - demo.launch() \ No newline at end of file diff --git a/spaces/merle/PROTEIN_GENERATOR/utils/examples/loop_design.sh b/spaces/merle/PROTEIN_GENERATOR/utils/examples/loop_design.sh deleted file mode 100644 index de6c21db8ed2bd4ab526e7c41e8fb7bd13f76754..0000000000000000000000000000000000000000 --- a/spaces/merle/PROTEIN_GENERATOR/utils/examples/loop_design.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH -J seq_diff -#SBATCH -p gpu -#SBATCH --mem=8g -#SBATCH --gres=gpu:a6000:1 -#SBATCH -o ./out/slurm/slurm_%j.out - -source activate /software/conda/envs/SE3nv - -srun python ../inference.py \ - --num_designs 10 \ - --pdb pdbs/G12D_manual_mut.pdb \ - --out out/ab_loop \ - --contigs A2-176,0 C7-16,0 H2-95,12-15,H111-116,0 L1-45,10-12,L56-107 \ - --T 25 --save_best_plddt --loop_design diff --git a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/py/model_bert_large_export.py b/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/py/model_bert_large_export.py deleted file mode 100644 index 4619908ff52a7f4a76c6ed7a66907cbdd3a7c819..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/server-side/fill-in-the-blank/py/model_bert_large_export.py +++ /dev/null @@ -1,19 +0,0 @@ -from transformers import (BertForMaskedLM, BertTokenizer) - -modelpath = 'bert-large-uncased-whole-word-masking' -model = BertForMaskedLM.from_pretrained(modelpath) - -model.save_pretrained('./bert-large-uncased-whole-word-masking') - - - - -# from transformers import (BertForMaskedLM, BertTokenizer) - -# modelpath = 'bert-large-uncased' -# model = BertForMaskedLM.from_pretrained(modelpath) - -# model.save_pretrained('./bert-large-uncased') - - - diff --git a/spaces/microsoft/visual_chatgpt/README.md b/spaces/microsoft/visual_chatgpt/README.md deleted file mode 100644 index d0618008e694979743945a6ca8a968509804fc62..0000000000000000000000000000000000000000 --- a/spaces/microsoft/visual_chatgpt/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Visual Chatgpt -emoji: 🎨 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: osl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mingyuan/ReMoDiffuse/mogen/models/attentions/base_attention.py b/spaces/mingyuan/ReMoDiffuse/mogen/models/attentions/base_attention.py deleted file mode 100644 index a78dccb1fd53836946d5bb0bdd1d2e8d70b0fbb3..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/ReMoDiffuse/mogen/models/attentions/base_attention.py +++ /dev/null @@ -1,146 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from ..utils.stylization_block import StylizationBlock -from ..builder import ATTENTIONS - - -@ATTENTIONS.register_module() -class BaseMixedAttention(nn.Module): - - def __init__(self, latent_dim, - text_latent_dim, - num_heads, - dropout, - time_embed_dim): - super().__init__() - self.num_heads = num_heads - - self.norm = nn.LayerNorm(latent_dim) - self.text_norm = nn.LayerNorm(text_latent_dim) - - self.query = nn.Linear(latent_dim, latent_dim) - self.key_text = nn.Linear(text_latent_dim, latent_dim) - self.value_text = nn.Linear(text_latent_dim, latent_dim) - self.key_motion = nn.Linear(latent_dim, latent_dim) - self.value_motion = nn.Linear(latent_dim, latent_dim) - - self.dropout = nn.Dropout(dropout) - self.proj_out = StylizationBlock(latent_dim, time_embed_dim, dropout) - - def forward(self, x, xf, emb, src_mask, cond_type, **kwargs): - """ - x: B, T, D - xf: B, N, L - """ - B, T, D = x.shape - N = xf.shape[1] + x.shape[1] - H = self.num_heads - # B, T, D - query = self.query(self.norm(x)).view(B, T, H, -1) - # B, N, D - text_cond_type = ((cond_type % 10) > 0).float().view(B, 1, 1).repeat(1, xf.shape[1], 1) - key = torch.cat(( - self.key_text(self.text_norm(xf)), - self.key_motion(self.norm(x)) - ), dim=1).view(B, N, H, -1) - - attention = torch.einsum('bnhl,bmhl->bnmh', query, key) - motion_mask = src_mask.view(B, 1, T, 1) - text_mask = text_cond_type.view(B, 1, -1, 1) - mask = torch.cat((text_mask, motion_mask), dim=2) - attention = attention + (1 - mask) * -1000000 - attention = F.softmax(attention, dim=2) - - value = torch.cat(( - self.value_text(self.text_norm(xf)) * text_cond_type, - self.value_motion(self.norm(x)) * src_mask, - ), dim=1).view(B, N, H, -1) - - y = torch.einsum('bnmh,bmhl->bnhl', attention, value).reshape(B, T, D) - y = x + self.proj_out(y, emb) - return y - - -@ATTENTIONS.register_module() -class BaseSelfAttention(nn.Module): - - def __init__(self, latent_dim, - num_heads, - dropout, - time_embed_dim): - super().__init__() - self.num_heads = num_heads - - self.norm = nn.LayerNorm(latent_dim) - self.query = nn.Linear(latent_dim, latent_dim) - self.key = nn.Linear(latent_dim, latent_dim) - self.value = nn.Linear(latent_dim, latent_dim) - - self.dropout = nn.Dropout(dropout) - self.proj_out = StylizationBlock(latent_dim, time_embed_dim, dropout) - - def forward(self, x, emb, src_mask, **kwargs): - """ - x: B, T, D - """ - B, T, D = x.shape - H = self.num_heads - # B, T, D - query = self.query(self.norm(x)).view(B, T, H, -1) - # B, N, D - key = self.key(self.norm(x)).view(B, T, H, -1) - - attention = torch.einsum('bnhl,bmhl->bnmh', query, key) - mask = src_mask.view(B, 1, T, 1) - attention = attention + (1 - mask) * -1000000 - attention = F.softmax(attention, dim=2) - value = (self.value(self.norm(x)) * src_mask).view(B, T, H, -1) - y = torch.einsum('bnmh,bmhl->bnhl', attention, value).reshape(B, T, D) - y = x + self.proj_out(y, emb) - return y - - -@ATTENTIONS.register_module() -class BaseCrossAttention(nn.Module): - - def __init__(self, latent_dim, - text_latent_dim, - num_heads, - dropout, - time_embed_dim): - super().__init__() - self.num_heads = num_heads - - self.norm = nn.LayerNorm(latent_dim) - self.text_norm = nn.LayerNorm(text_latent_dim) - - self.query = nn.Linear(latent_dim, latent_dim) - self.key = nn.Linear(text_latent_dim, latent_dim) - self.value = nn.Linear(text_latent_dim, latent_dim) - - self.dropout = nn.Dropout(dropout) - self.proj_out = StylizationBlock(latent_dim, time_embed_dim, dropout) - - def forward(self, x, xf, emb, src_mask, cond_type, **kwargs): - """ - x: B, T, D - xf: B, N, L - """ - B, T, D = x.shape - N = xf.shape[1] - H = self.num_heads - # B, T, D - query = self.query(self.norm(x)).view(B, T, H, -1) - # B, N, D - text_cond_type = ((cond_type % 10) > 0).float().view(B, 1, 1).repeat(1, xf.shape[1], 1) - key = self.key(self.text_norm(xf)).view(B, N, H, -1) - attention = torch.einsum('bnhl,bmhl->bnmh', query, key) - mask = text_cond_type.view(B, 1, -1, 1) - attention = attention + (1 - mask) * -1000000 - attention = F.softmax(attention, dim=2) - - value = (self.value(self.text_norm(xf)) * text_cond_type).view(B, N, H, -1) - y = torch.einsum('bnmh,bmhl->bnhl', attention, value).reshape(B, T, D) - y = x + self.proj_out(y, emb) - return y diff --git a/spaces/mithril-security/blind_chat/src/lib/utils/randomUuid.ts b/spaces/mithril-security/blind_chat/src/lib/utils/randomUuid.ts deleted file mode 100644 index 9d536365c57659305ad28d6fc06b89d77ab337ab..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/lib/utils/randomUuid.ts +++ /dev/null @@ -1,14 +0,0 @@ -type UUID = ReturnType; - -export function randomUUID(): UUID { - // Only on old safari / ios - if (!("randomUUID" in crypto)) { - return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) => - ( - Number(c) ^ - (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4))) - ).toString(16) - ) as UUID; - } - return crypto.randomUUID(); -} diff --git a/spaces/ml6team/post-processing-summarization/custom_renderer.py b/spaces/ml6team/post-processing-summarization/custom_renderer.py deleted file mode 100644 index 2610ce8df612751f8d007da7d52e51dcaf474550..0000000000000000000000000000000000000000 --- a/spaces/ml6team/post-processing-summarization/custom_renderer.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import Dict -from PIL import ImageFont - -TPL_DEP_WORDS = """ - - {text} - {tag} - -""" - -TPL_DEP_SVG = """ -{content} -""" - -TPL_DEP_ARCS = """ - - - - {label} - - - -""" - - -def get_pil_text_size(text, font_size, font_name): - font = ImageFont.truetype(font_name, font_size) - size = font.getsize(text) - return size - - -def render_arrow( - label: str, start: int, end: int, direction: str, i: int -) -> str: - """Render individual arrow. - - label (str): Dependency label. - start (int): Index of start word. - end (int): Index of end word. - direction (str): Arrow direction, 'left' or 'right'. - i (int): Unique ID, typically arrow index. - RETURNS (str): Rendered SVG markup. - """ - - arc = get_arc(start + 10, 50, 5, end + 10) - arrowhead = get_arrowhead(direction, start + 10, 50, end + 10) - label_side = "right" if direction == "rtl" else "left" - return TPL_DEP_ARCS.format( - id=0, - i=0, - stroke=2, - head=arrowhead, - label=label, - label_side=label_side, - arc=arc, - ) - - -def get_arc(x_start: int, y: int, y_curve: int, x_end: int) -> str: - """Render individual arc. - - x_start (int): X-coordinate of arrow start point. - y (int): Y-coordinate of arrow start and end point. - y_curve (int): Y-corrdinate of Cubic Bézier y_curve point. - x_end (int): X-coordinate of arrow end point. - RETURNS (str): Definition of the arc path ('d' attribute). - """ - template = "M{x},{y} C{x},{c} {e},{c} {e},{y}" - return template.format(x=x_start, y=y, c=y_curve, e=x_end) - - -def get_arrowhead(direction: str, x: int, y: int, end: int) -> str: - """Render individual arrow head. - - direction (str): Arrow direction, 'left' or 'right'. - x (int): X-coordinate of arrow start point. - y (int): Y-coordinate of arrow start and end point. - end (int): X-coordinate of arrow end point. - RETURNS (str): Definition of the arrow head path ('d' attribute). - """ - arrow_width = 6 - if direction == "left": - p1, p2, p3 = (x, x - arrow_width + 2, x + arrow_width - 2) - else: - p1, p2, p3 = (end, end + arrow_width - 2, end - arrow_width + 2) - return f"M{p1},{y + 2} L{p2},{y - arrow_width} {p3},{y - arrow_width}" - - -def render_sentence_custom(unmatched_list: Dict, nlp): - arcs_svg = [] - doc = nlp(unmatched_list["sentence"]) - - x_value_counter = 10 - index_counter = 0 - svg_words = [] - words_under_arc = [] - direction_current = "rtl" - - if unmatched_list["cur_word_index"] < unmatched_list["target_word_index"]: - min_index = unmatched_list["cur_word_index"] - max_index = unmatched_list["target_word_index"] - direction_current = "left" - else: - max_index = unmatched_list["cur_word_index"] - min_index = unmatched_list["target_word_index"] - for i, token in enumerate(doc): - word = str(token) - word = word + " " - pixel_x_length = get_pil_text_size(word, 16, 'arial.ttf')[0] - svg_words.append(TPL_DEP_WORDS.format(text=word, tag="", x=x_value_counter, y=70)) - if min_index <= index_counter <= max_index: - words_under_arc.append(x_value_counter) - if index_counter < max_index - 1: - x_value_counter += 50 - index_counter += 1 - x_value_counter += pixel_x_length + 4 - - arcs_svg.append(render_arrow(unmatched_list['dep'], words_under_arc[0], words_under_arc[-1], direction_current, i)) - - content = "".join(svg_words) + "".join(arcs_svg) - - full_svg = TPL_DEP_SVG.format( - id=0, - width=1200, # 600 - height=75, # 125 - color="#00000", - bg="#ffffff", - font="Arial", - content=content, - dir="ltr", - lang="en", - ) - return full_svg diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/criterion.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/criterion.py deleted file mode 100644 index f4d5b71242f87c6f67463f9c31f873a742f3e5c7..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/modeling/criterion.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -""" -MaskFormer criterion. -""" -import torch -import torch.nn.functional as F -from torch import nn - -from detectron2.utils.comm import get_world_size - -from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list - - -def dice_loss(inputs, targets, num_masks): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * (inputs * targets).sum(-1) - denominator = inputs.sum(-1) + targets.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - return loss.sum() / num_masks - - -def sigmoid_focal_loss( - inputs, targets, num_masks, alpha: float = 0.25, gamma: float = 2 -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - return loss.mean(1).sum() / num_masks - - -class SetCriterion(nn.Module): - """This class computes the loss for DETR. - The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model - 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) - """ - - def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): - """Create the criterion. - Parameters: - num_classes: number of object categories, omitting the special no-object category - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - """ - super().__init__() - self.num_classes = num_classes - self.matcher = matcher - self.weight_dict = weight_dict - self.eos_coef = eos_coef - self.losses = losses - if eos_coef > 0: - - empty_weight = torch.ones(self.num_classes + 1) - - empty_weight[-1] = self.eos_coef - self.register_buffer("empty_weight", empty_weight) - self.use_ignore_idx = False - else: - self.use_ignore_idx = True - self.cur_target = [] - - def loss_labels(self, outputs, targets, indices, num_masks): - """Classification loss (NLL) - targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] - """ - assert "pred_logits" in outputs - src_logits = outputs["pred_logits"] - - idx = self._get_src_permutation_idx(indices) - target_classes_o = torch.cat( - [t["labels"][J] for t, (_, J) in zip(targets, indices)] - ) - target_classes = torch.full( - src_logits.shape[:2], - self.num_classes, - dtype=torch.int64, - device=src_logits.device, - ) - target_classes[idx] = target_classes_o - if self.use_ignore_idx: - loss_ce = F.cross_entropy( - src_logits.transpose(1, 2), - target_classes, - ignore_index=self.num_classes, - ) - else: - if "empty_weight" in outputs: - empty_weight = torch.cat( - [outputs["empty_weight"], self.empty_weight[-1:]] - ).detach() - else: - empty_weight = self.empty_weight - loss_ce = F.cross_entropy( - src_logits.transpose(1, 2), target_classes, empty_weight - ) - losses = {"loss_ce": loss_ce} - return losses - - def loss_masks(self, outputs, targets, indices, num_masks): - """Compute the losses related to the masks: the focal loss and the dice loss. - targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] - """ - assert "pred_masks" in outputs - - src_idx = self._get_src_permutation_idx(indices) - tgt_idx = self._get_tgt_permutation_idx(indices) - src_masks = outputs["pred_masks"] - src_masks = src_masks[src_idx] - masks = [t["masks"] for t in targets] - # TODO use valid to mask invalid areas due to padding in loss - target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() - target_masks = target_masks.to(src_masks) - target_masks = target_masks[tgt_idx] - - # upsample predictions to the target size - src_masks = F.interpolate( - src_masks[:, None], - size=target_masks.shape[-2:], - mode="bilinear", - align_corners=False, - ) - src_masks = src_masks[:, 0].flatten(1) - - target_masks = target_masks.flatten(1) - target_masks = target_masks.view(src_masks.shape) - losses = { - "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_masks), - "loss_dice": dice_loss(src_masks, target_masks, num_masks), - } - return losses - - def _get_src_permutation_idx(self, indices): - # permute predictions following indices - batch_idx = torch.cat( - [torch.full_like(src, i) for i, (src, _) in enumerate(indices)] - ) - src_idx = torch.cat([src for (src, _) in indices]) - return batch_idx, src_idx - - def _get_tgt_permutation_idx(self, indices): - # permute targets following indices - batch_idx = torch.cat( - [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)] - ) - tgt_idx = torch.cat([tgt for (_, tgt) in indices]) - return batch_idx, tgt_idx - - def get_loss(self, loss, outputs, targets, indices, num_masks): - loss_map = {"labels": self.loss_labels, "masks": self.loss_masks} - assert loss in loss_map, f"do you really want to compute {loss} loss?" - return loss_map[loss](outputs, targets, indices, num_masks) - - def forward(self, outputs, targets): - """This performs the loss computation. - Parameters: - outputs: dict of tensors, see the output specification of the model for the format - targets: list of dicts, such that len(targets) == batch_size. - The expected keys in each dict depends on the losses applied, see each loss' doc - """ - outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"} - - # Retrieve the matching between the outputs of the last layer and the targets - indices = self.matcher(outputs_without_aux, targets) - - # Compute the average number of target boxes accross all nodes, for normalization purposes - num_masks = sum(len(t["labels"]) for t in targets) - num_masks = torch.as_tensor( - [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device - ) - if is_dist_avail_and_initialized(): - torch.distributed.all_reduce(num_masks) - num_masks = torch.clamp(num_masks / get_world_size(), min=1).item() - - # Compute all the requested losses - losses = {} - for loss in self.losses: - losses.update(self.get_loss(loss, outputs, targets, indices, num_masks)) - - # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. - if "aux_outputs" in outputs: - for i, aux_outputs in enumerate(outputs["aux_outputs"]): - indices = self.matcher(aux_outputs, targets) - for loss in self.losses: - l_dict = self.get_loss( - loss, aux_outputs, targets, indices, num_masks - ) - l_dict = {k + f"_{i}": v for k, v in l_dict.items()} - losses.update(l_dict) - - return losses - - def clean_buffer(self): - self.cur_target = [] diff --git a/spaces/mohitmayank/EmojiFinder/README.md b/spaces/mohitmayank/EmojiFinder/README.md deleted file mode 100644 index 87afcccbf8e4d4a5a77f2c1e581b1f81b4ff5028..0000000000000000000000000000000000000000 --- a/spaces/mohitmayank/EmojiFinder/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: EmojiFinder 🕵 -emoji: 🐨 -colorFrom: gray -colorTo: purple -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/mueller-franzes/medfusion-app/tests/losses/test_lpips.py b/spaces/mueller-franzes/medfusion-app/tests/losses/test_lpips.py deleted file mode 100644 index 606fbac8362a0a491fc35d3e12a0394a7509829d..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/tests/losses/test_lpips.py +++ /dev/null @@ -1,37 +0,0 @@ - - -import torch -from medical_diffusion.loss.perceivers import LPIPS -from medical_diffusion.data.datasets import AIROGSDataset, SimpleDataset3D - -loss = LPIPS(normalize=False) -torch.manual_seed(0) - -# input = torch.randn((1, 3, 16, 128, 128)) # 3D - 1 channel -# input = torch.randn((1, 1, 128, 128)) # 2D - 1 channel -# input = torch.randn((1, 3, 128, 128)) # 2D - 3 channel - -# target = input/2 - -# print(loss(input, target)) - - -# ds = AIROGSDataset( -# crawler_ext='jpg', -# image_resize=(256, 256), -# image_crop=(256, 256), -# path_root='/mnt/hdd/datasets/eye/AIROGS/data/', # '/home/gustav/Documents/datasets/AIROGS/dataset', '/mnt/hdd/datasets/eye/AIROGS/data/' -# ) -ds = SimpleDataset3D( - crawler_ext='nii.gz', - image_resize=None, - image_crop=None, - flip=True, - path_root='/mnt/hdd/datasets/breast/DUKE/dataset_lr_256_256_32', - use_znorm=True - ) - -input = ds[0]['source'][None] - -target = torch.randn_like(input) -print(loss(input, target)) \ No newline at end of file diff --git a/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/transforms.py b/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/transforms.py deleted file mode 100644 index 20251ef28bc8a9c2b599ee7365a5394ae568720e..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/genious_bgremover/carvekit/ml/arch/fba_matting/transforms.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Modified by Nikita Selin (OPHoperHPO)[https://github.com/OPHoperHPO]. -Source url: https://github.com/MarcoForte/FBA_Matting -License: MIT License -""" -import cv2 -import numpy as np - -group_norm_std = [0.229, 0.224, 0.225] -group_norm_mean = [0.485, 0.456, 0.406] - - -def dt(a): - return cv2.distanceTransform((a * 255).astype(np.uint8), cv2.DIST_L2, 0) - - -def trimap_transform(trimap): - h, w = trimap.shape[0], trimap.shape[1] - - clicks = np.zeros((h, w, 6)) - for k in range(2): - if np.count_nonzero(trimap[:, :, k]) > 0: - dt_mask = -dt(1 - trimap[:, :, k]) ** 2 - L = 320 - clicks[:, :, 3 * k] = np.exp(dt_mask / (2 * ((0.02 * L) ** 2))) - clicks[:, :, 3 * k + 1] = np.exp(dt_mask / (2 * ((0.08 * L) ** 2))) - clicks[:, :, 3 * k + 2] = np.exp(dt_mask / (2 * ((0.16 * L) ** 2))) - - return clicks - - -def groupnorm_normalise_image(img, format="nhwc"): - """ - Accept rgb in range 0,1 - """ - if format == "nhwc": - for i in range(3): - img[..., i] = (img[..., i] - group_norm_mean[i]) / group_norm_std[i] - else: - for i in range(3): - img[..., i, :, :] = ( - img[..., i, :, :] - group_norm_mean[i] - ) / group_norm_std[i] - - return img diff --git a/spaces/mygyasir/genious_bgremover/carvekit/web/routers/__init__.py b/spaces/mygyasir/genious_bgremover/carvekit/web/routers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/nazneen/model-usage/README.md b/spaces/nazneen/model-usage/README.md deleted file mode 100644 index f2f55f35fc8cf4dec94e38cc3308a0db4c0d390b..0000000000000000000000000000000000000000 --- a/spaces/nazneen/model-usage/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Model Usage -emoji: ⏱️ -colorFrom: pink -colorTo: blue -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/AutoCAD Plant 3D 2015 X86 X64 Torrent [PATCHED] Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/AutoCAD Plant 3D 2015 X86 X64 Torrent [PATCHED] Download.md deleted file mode 100644 index 9aa0795eefeb4e94924315a580758c461a2b4c3e..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/AutoCAD Plant 3D 2015 X86 X64 Torrent [PATCHED] Download.md +++ /dev/null @@ -1,135 +0,0 @@ -
                -

                AutoCAD Plant 3D 2015 X86 X64 Torrent Download

                -

                Are you looking for a powerful and easy-to-use tool for designing and modeling plants of all sizes? Do you want to create accurate and realistic plant layouts, piping diagrams, isometrics, and reports? If yes, then you might be interested in AutoCAD Plant 3D 2015, a software that is specially designed for plant design and engineering.

                -

                But how can you get this software without spending a fortune? One way is to use torrent download, which is a method of sharing files over the internet. Torrent download can help you access AutoCAD Plant 3D 2015 for free or at a low cost. However, torrent download also comes with some disadvantages and risks that you should be aware of before using it.

                -

                AutoCAD Plant 3D 2015 X86 X64 Torrent Download


                DOWNLOADhttps://urlcod.com/2uI9zb



                -

                In this article, we will explain what AutoCAD Plant 3D 2015 is, what are its features and system requirements, why use torrent download for it, how to download and install it from torrent, and how to use it for plant design. We will also answer some frequently asked questions about AutoCAD Plant 3D 2015 and torrent download. By the end of this article, you will have a clear idea of whether AutoCAD Plant 3D 2015 X86 X64 Torrent Download is worth it or not.

                -

                What is AutoCAD Plant 3D 2015?

                -

                AutoCAD Plant 3D 2015 is a software that is part of the Autodesk AutoCAD family. It is an industry-specific toolset that allows you to create and edit plant layouts, piping diagrams, orthographics, isometrics, and reports. It also enables you to collaborate with other project teams in a cloud-based common data environment. With AutoCAD Plant 3D 2015, you can:

                -
                  -
                • Create and modify P&IDs (piping and instrumentation diagrams) with in-context commands and industry-standard symbol libraries.
                • -
                • Create and edit 3D models of plants with parametric equipment, structural elements, piping components, valves, supports, and more.
                • -
                • Extract piping orthographics and isometrics automatically from the 3D model with customizable settings and annotations.
                • -
                • Generate reports such as bills of materials, line lists, valve lists, etc. from the P&ID or the 3D model.
                • -
                • Validate data consistency across P&IDs and the 3D model with user-definable rules.
                • -
                • Collaborate securely with other project stakeholders in a cloud-based common data environment using BIM (building information modeling) workflows.
                • -
                -

                Features of AutoCAD Plant 3D 2015

                -

                Some of the features of AutoCAD Plant 3D 2015 are:

                -
                  -
                • Powerful plant design tool: You can create realistic and accurate plant layouts and models with a comprehensive set of tools that are tailored for plant design. You can also customize the tools according to your preferences and standards.
                • -
                • Flexible and affordable plans: You can subscribe to AutoCAD Plant 3D 2015 for one or multiple years with different term options. You can also add or remove seats as needed. You can access the latest updates and support as well as cloud services with your subscription.
                • -
                • Cloud collaboration: You can share and manage your plant design data in a cloud-based common data environment using BIM 360 Design. You can also access your data from any device and location with the Autodesk Desktop Connector.
                • -
                • Data exchange: You can import and export data from various sources and formats, such as AutoCAD P&ID, Revit, Navisworks, Excel, PDF, etc. You can also use open APIs (application programming interfaces) to integrate AutoCAD Plant 3D 2015 with other applications.
                • -
                • Customization and extensibility: You can customize AutoCAD Plant 3D 2015 to suit your specific needs and standards. You can create and edit catalogs, specs, templates, symbols, rules, etc. You can also use Autodesk Exchange Apps to find and download additional content and tools for AutoCAD Plant 3D 2015.
                • -
                -

                System requirements for AutoCAD Plant 3D 2015

                -

                To run AutoCAD Plant 3D 2015 smoothly on your computer, you need to meet the following minimum system requirements:

                - - - - - - - - - - - - - - - - - -
                Operating systemMemory (RAM)Disk spaceProcessorDisplay resolutionGraphics card
                Windows 7/8/8.1 (32-bit or 64-bit)4 GB (8 GB recommended)8 GB (12 GB recommended)Intel Pentium 4 or AMD Athlon 64 (3 GHz or higher)1280 x 1024 (1600 x 1050 or higher recommended)Windows display adapter capable of DirectX 9.0c (DirectX 11 compliant card recommended)
                -

                Why use torrent download for AutoCAD Plant 3D 2015?

                -

                Torrent download is a way of downloading files from the internet using a peer-to-peer (P2P) network. A P2P network is a network of computers that share files with each other without relying on a central server. A torrent file is a small file that contains information about the larger file that you want to download, such as its name, size, location, etc. A torrent client is a software that reads the torrent file and connects you to other computers that have the file or parts of it. A torrent client then downloads the file or parts of it from those computers and assembles them into the complete file on your computer.

                -

                Torrent download can be used for downloading any type of file, including software like AutoCAD Plant 3D 2015. There are some advantages and disadvantages of using torrent download for AutoCAD Plant 3D 2015, which we will discuss below.

                -

                Advantages of torrent download

                -

                Some of the advantages of using torrent download for AutoCAD Plant 3D 2015 are:

                -
                  -
                • Fast and efficient: Torrent download can be faster and more efficient than direct download because it uses multiple sources and connections to download the file. You can also resume or pause the download at any time without losing the progress.
                • -
                • Cheap or free: Torrent download can help you save money by accessing AutoCAD Plant 3D 2015 for free or at a low cost. You do not need to pay for a subscription or a license to use the software.
                • -
                • Easy and convenient: Torrent download can be easy and convenient because you only need a torrent client and a torrent file to start the download. You do not need to register or log in to any website or service to use the software.
                • -
                • Variety and availability: Torrent download can offer you a variety of options and versions of AutoCAD Plant 3D 2015 to choose from. You can also find older or newer versions of the software that may not be available on the official website or other sources.
                • -
                -

                Disadvantages and risks of torrent download

                -

                Some of the disadvantages and risks of using torrent download for AutoCAD Plant 3D 2015 are:

                -

                -
                  -
                • Illegal and unethical: Torrent download can be illegal and unethical because it violates the intellectual property rights of the software developer. By downloading AutoCAD Plant 3D 2015 from torrent, you are not supporting the developer or paying for their work. You may also face legal consequences if you are caught downloading or using pirated software.
                • -
                • Unsafe and unreliable: Torrent download can be unsafe and unreliable because it exposes your computer to viruses, malware, spyware, etc. that may harm your system or steal your data. You may also download corrupted, incomplete, or fake files that may not work properly or at all. You may also face technical issues or errors while downloading or installing the software.
                • -
                • Unfair and unprofessional: Torrent download can be unfair and unprofessional because it deprives you of the benefits and features that come with the official version of AutoCAD Plant 3D 2015. You may not be able to access the latest updates, support, cloud services, etc. that are available for the subscribers or licensed users. You may also face compatibility or performance issues with other software or hardware that are not tested or certified for the pirated version of AutoCAD Plant 3D 2015.
                • -
                -

                How to download and install AutoCAD Plant 3D 2015 from torrent?

                -

                If you still want to use torrent download for AutoCAD Plant 3D 2015, you need to follow some steps to download and install it from torrent. Here are the steps:

                -

                Steps to download AutoCAD Plant 3D 2015 from torrent

                -
                  -
                1. Find a reliable and reputable torrent website that offers AutoCAD Plant 3D 2015 X86 X64 Torrent Download. Some examples are The Pirate Bay, Kickass Torrents, RARBG, etc. However, be careful of the risks and legality of using these websites.
                2. -
                3. Search for AutoCAD Plant 3D 2015 on the torrent website and choose the torrent file that matches your system requirements and preferences. Check the file size, seeders, leechers, comments, ratings, etc. to ensure the quality and safety of the file.
                4. -
                5. Download the torrent file to your computer or copy the magnet link to your clipboard.
                6. -
                7. Download and install a torrent client on your computer if you do not have one already. Some examples are uTorrent, BitTorrent, qBittorrent, etc. However, be careful of the risks and legality of using these software.
                8. -
                9. Open the torrent file with your torrent client or paste the magnet link into your torrent client.
                10. -
                11. Select the destination folder for the downloaded file and start the download process.
                12. -
                13. Wait for the download to complete. The time may vary depending on your internet speed, file size, seeders, leechers, etc.
                14. -
                -

                Steps to install AutoCAD Plant 3D 2015 from torrent

                -
                  -
                1. Locate the downloaded file on your computer and extract it if it is compressed in a ZIP or RAR format.
                2. -
                3. Open the extracted folder and look for the setup file or the ISO file of AutoCAD Plant 3D 2015.
                4. -
                5. If it is a setup file, double-click on it and follow the instructions on the screen to install AutoCAD Plant 3D 2015 on your computer.
                6. -
                7. If it is an ISO file, you need to mount it on a virtual drive using a software like Daemon Tools or PowerISO. Then, open the virtual drive and run the setup file as above.
                8. -
                9. During the installation process, you may need to enter a serial number or a product key for AutoCAD Plant 3D 2015. You can find these in a text file or a readme file in the downloaded folder. However, be careful of the risks and legality of using these codes.
                10. -
                11. You may also need to apply a crack or a patch for AutoCAD Plant 3D 2015 to bypass the activation or registration process. You can find these in a separate folder or a subfolder in the downloaded folder. However, be careful of the risks and legality of using these files.
                12. -
                13. After the installation is complete, you can launch AutoCAD Plant 3D 2015 from your desktop or start menu and start using it for plant design.
                14. -
                -

                How to use AutoCAD Plant 3D 2015 for plant design?

                -

                Now that you have downloaded and installed AutoCAD Plant 3D 2015 from torrent, you may wonder how to use it for plant design. Here are some basic steps to get you started:

                -

                Basic workflow of AutoCAD Plant 3D 2015

                -
                  -
                1. Create a new project or open an existing project in AutoCAD Plant 3D 2015. A project is a collection of files and settings that define your plant design data and environment.
                2. -
                3. Create or edit P&IDs using the P&ID workspace. A P&ID is a schematic diagram that shows the piping and instrumentation of a plant system. You can use various tools and commands to draw P&ID objects such as lines, valves, equipment, instruments, etc. You can also add data and annotations to your P &ID objects. You can also validate and check your P&ID for errors and inconsistencies.
                4. -
                5. Create or edit 3D models using the Plant 3D workspace. A 3D model is a realistic representation of the physical layout and components of a plant system. You can use various tools and commands to create and modify 3D objects such as equipment, piping, structural elements, supports, etc. You can also add data and annotations to your 3D objects. You can also link your 3D model to your P&ID and synchronize the data between them.
                6. -
                7. Extract orthographics and isometrics from your 3D model using the Ortho and Iso workspace. Orthographics are 2D drawings that show the elevation, plan, and section views of your 3D model. Isometrics are 2D drawings that show the piping layout of your 3D model in a 3D perspective. You can use various tools and commands to generate, edit, and annotate orthographics and isometrics from your 3D model. You can also customize the settings and styles of your orthographics and isometrics.
                8. -
                9. Generate reports from your P&ID or your 3D model using the Report Creator. Reports are documents that contain information about your plant design data, such as bills of materials, line lists, valve lists, etc. You can use various tools and commands to create, edit, and export reports from your P&ID or your 3D model. You can also customize the settings and formats of your reports.
                10. -
                11. Collaborate with other project teams using BIM 360 Design. BIM 360 Design is a cloud-based service that allows you to share and manage your plant design data in a common data environment. You can use various tools and commands to upload, download, view, comment, and compare your plant design data with other project stakeholders. You can also use BIM 360 Design to access your plant design data from any device and location.
                12. -
                -

                Tips and tricks for using AutoCAD Plant 3D 2015

                -

                Here are some tips and tricks that can help you use AutoCAD Plant 3D 2015 more effectively and efficiently:

                -
                  -
                • Use the Project Manager to organize and manage your project files and settings. You can use the Project Manager to create, open, close, copy, rename, delete, or backup your project files. You can also use the Project Manager to edit your project settings, such as units, catalogs, specs, templates, etc.
                • -
                • Use the Properties Palette to view and edit the properties of your P&ID or 3D objects. You can use the Properties Palette to change the attributes, data, dimensions, styles, etc. of your P&ID or 3D objects. You can also use the Properties Palette to filter or select objects based on their properties.
                • -
                • Use the Data Manager to view and edit the data of your P&ID or 3D objects in a spreadsheet format. You can use the Data Manager to add, modify, delete, or export data of your P&ID or 3D objects. You can also use the Data Manager to run queries or reports on your data.
                • -
                • Use the Spec Editor to create and edit catalogs and specs for your piping components. You can use the Spec Editor to define the properties, dimensions, materials, connections, etc. of your piping components. You can also use the Spec Editor to import or export catalogs and specs from other sources.
                • -
                • Use the Project Setup to customize the rules, styles, settings, etc. of your project. You can use the Project Setup to define the rules for validating your P&ID or 3D model data consistency. You can also use the Project Setup to define the styles for generating orthographics and isometrics from your 3D model. You can also use the Project Setup to define the settings for generating reports from your P&ID or 3D model.
                • -
                • Use the Help menu to access the online documentation, tutorials, videos, forums, etc. for AutoCAD Plant 3D 2015. You can use the Help menu to learn more about the features and functions of AutoCAD Plant 3D 2015. You can also use the Help menu to find answers to your questions or problems with AutoCAD Plant 3D 2015.
                • -
                -

                Conclusion

                -

                In conclusion, AutoCAD Plant 3D 2015 is a software that is designed for plant design and engineering. It allows you to create and edit plant layouts, piping diagrams, orthographics, isometrics, and reports. It also enables you to collaborate with other project teams in a cloud-based common data environment.

                -

                Torrent download is a way of downloading files from the internet using a peer-to-peer network. Torrent download can help you access AutoCAD Plant 3D 2015 for free or at a low cost. However, torrent download also has some disadvantages and risks that you should consider before using it.

                -

                If you decide to use torrent download for AutoCAD Plant 3D 2015, you need to follow some steps to download and install it from torrent. You also need to follow some steps to use it for plant design. You can also use some tips and tricks to improve your experience with AutoCAD Plant 3D 2015.

                -

                We hope that this article has given you a clear idea of whether AutoCAD Plant 3D 2015 X86 X64 Torrent Download is worth it or not. If you have any questions or feedback, please feel free to leave a comment below.

                -

                FAQs

                -

                Here are some frequently asked questions about AutoCAD Plant 3D 2015 and torrent download:

                -
                  -
                • Q: Is AutoCAD Plant 3D 2015 compatible with other Autodesk software?
                • -
                • A: Yes, AutoCAD Plant 3D 2015 is compatible with other Autodesk software, such as AutoCAD P&ID, Revit, Navisworks, BIM 360 Design, etc. You can exchange data and information between these software using various formats and methods.
                • -
                • Q: How much does AutoCAD Plant 3D 2015 cost?
                • -
                • A: The official price of AutoCAD Plant 3D 2015 is $6,295 for a one-year subscription or $16,065 for a three-year subscription. However, you may find different prices or discounts from other sources or resellers.
                • -
                • Q: Is torrent download legal?
                • -
                • A: Torrent download itself is not illegal, but downloading or sharing copyrighted content without permission or license is illegal and unethical. You may face legal actions or penalties if you are caught downloading or using pirated software.
                • -
                • Q: How can I protect my computer from viruses or malware when using torrent download?
                • -
                • A: You can protect your computer from viruses or malware when using torrent download by following some precautions, such as:
                • -
                    -
                  • Use a reliable and reputable torrent website and torrent client.
                  • -
                  • Check the file size, seeders, leechers, comments, ratings, etc. of the torrent file before downloading it.
                  • -
                  • Scan the downloaded file with an antivirus or anti-malware software before opening it.
                  • -
                  • Avoid opening or running any suspicious or unknown files or programs that come with the downloaded file.
                  • -
                  • Use a VPN (virtual private network) or a proxy to hide your IP address and location when using torrent download.
                  • -
                  -
                • Q: How can I update AutoCAD Plant 3D 2015 if I download it from torrent?
                • -
                • A: You may not be able to update AutoCAD Plant 3D 2015 if you download it from torrent, because the official updates may not be compatible with the pirated version of the software. You may also lose the crack or patch that you applied to activate the software. You may need to find and download another torrent file that contains the updated version of AutoCAD Plant 3D 2015.
                • -

                b2dd77e56b
                -
                -
                \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/datasets/dataset_type.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/datasets/dataset_type.py deleted file mode 100644 index ed8f8f299af96847d9d16a77920429fe0195c526..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/datasets/dataset_type.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from enum import Enum - - -class DatasetType(Enum): - """ - Dataset type, mostly used for datasets that contain data to bootstrap models on - """ - - VIDEO_LIST = "video_list" diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_roi_align_rotated.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_roi_align_rotated.py deleted file mode 100644 index 7323d7d5a86816f337571221313c428238c439f4..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/layers/test_roi_align_rotated.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import unittest -import cv2 -import torch -from torch.autograd import Variable, gradcheck - -from detectron2.layers.roi_align import ROIAlign -from detectron2.layers.roi_align_rotated import ROIAlignRotated - -logger = logging.getLogger(__name__) - - -class ROIAlignRotatedTest(unittest.TestCase): - def _box_to_rotated_box(self, box, angle): - return [ - (box[0] + box[2]) / 2.0, - (box[1] + box[3]) / 2.0, - box[2] - box[0], - box[3] - box[1], - angle, - ] - - def _rot90(self, img, num): - num = num % 4 # note: -1 % 4 == 3 - for _ in range(num): - img = img.transpose(0, 1).flip(0) - return img - - def test_forward_output_0_90_180_270(self): - for i in range(4): - # i = 0, 1, 2, 3 corresponding to 0, 90, 180, 270 degrees - img = torch.arange(25, dtype=torch.float32).reshape(5, 5) - """ - 0 1 2 3 4 - 5 6 7 8 9 - 10 11 12 13 14 - 15 16 17 18 19 - 20 21 22 23 24 - """ - box = [1, 1, 3, 3] - rotated_box = self._box_to_rotated_box(box=box, angle=90 * i) - - result = self._simple_roi_align_rotated(img=img, box=rotated_box, resolution=(4, 4)) - - # Here's an explanation for 0 degree case: - # point 0 in the original input lies at [0.5, 0.5] - # (the center of bin [0, 1] x [0, 1]) - # point 1 in the original input lies at [1.5, 0.5], etc. - # since the resolution is (4, 4) that divides [1, 3] x [1, 3] - # into 4 x 4 equal bins, - # the top-left bin is [1, 1.5] x [1, 1.5], and its center - # (1.25, 1.25) lies at the 3/4 position - # between point 0 and point 1, point 5 and point 6, - # point 0 and point 5, point 1 and point 6, so it can be calculated as - # 0.25*(0*0.25+1*0.75)+(5*0.25+6*0.75)*0.75 = 4.5 - result_expected = torch.tensor( - [ - [4.5, 5.0, 5.5, 6.0], - [7.0, 7.5, 8.0, 8.5], - [9.5, 10.0, 10.5, 11.0], - [12.0, 12.5, 13.0, 13.5], - ] - ) - # This is also an upsampled version of [[6, 7], [11, 12]] - - # When the box is rotated by 90 degrees CCW, - # the result would be rotated by 90 degrees CW, thus it's -i here - result_expected = self._rot90(result_expected, -i) - - assert torch.allclose(result, result_expected) - - def test_resize(self): - H, W = 30, 30 - input = torch.rand(H, W) * 100 - box = [10, 10, 20, 20] - rotated_box = self._box_to_rotated_box(box, angle=0) - output = self._simple_roi_align_rotated(img=input, box=rotated_box, resolution=(5, 5)) - - input2x = cv2.resize(input.numpy(), (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) - input2x = torch.from_numpy(input2x) - box2x = [x / 2 for x in box] - rotated_box2x = self._box_to_rotated_box(box2x, angle=0) - output2x = self._simple_roi_align_rotated(img=input2x, box=rotated_box2x, resolution=(5, 5)) - assert torch.allclose(output2x, output) - - def _simple_roi_align_rotated(self, img, box, resolution): - """ - RoiAlignRotated with scale 1.0 and 0 sample ratio. - """ - op = ROIAlignRotated(output_size=resolution, spatial_scale=1.0, sampling_ratio=0) - input = img[None, None, :, :] - - rois = [0] + list(box) - rois = torch.tensor(rois, dtype=torch.float32)[None, :] - result_cpu = op.forward(input, rois) - if torch.cuda.is_available(): - result_cuda = op.forward(input.cuda(), rois.cuda()) - assert torch.allclose(result_cpu, result_cuda.cpu()) - return result_cpu[0, 0] - - def test_empty_box(self): - img = torch.rand(5, 5) - out = self._simple_roi_align_rotated(img, [2, 3, 0, 0, 0], (7, 7)) - self.assertTrue((out == 0).all()) - - def test_roi_align_rotated_gradcheck_cpu(self): - dtype = torch.float64 - device = torch.device("cpu") - roi_align_rotated_op = ROIAlignRotated( - output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1 - ).to(dtype=dtype, device=device) - x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) - # roi format is (batch index, x_center, y_center, width, height, angle) - rois = torch.tensor( - [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], - dtype=dtype, - device=device, - ) - - def func(input): - return roi_align_rotated_op(input, rois) - - assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU" - assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU" - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_roi_align_rotated_gradient_cuda(self): - """ - Compute gradients for ROIAlignRotated with multiple bounding boxes on the GPU, - and compare the result with ROIAlign - """ - # torch.manual_seed(123) - dtype = torch.float64 - device = torch.device("cuda") - pool_h, pool_w = (5, 5) - - roi_align = ROIAlign(output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2).to( - device=device - ) - - roi_align_rotated = ROIAlignRotated( - output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2 - ).to(device=device) - - x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) - # x_rotated = x.clone() won't work (will lead to grad_fun=CloneBackward)! - x_rotated = Variable(x.data.clone(), requires_grad=True) - - # roi_rotated format is (batch index, x_center, y_center, width, height, angle) - rois_rotated = torch.tensor( - [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], - dtype=dtype, - device=device, - ) - - y_rotated = roi_align_rotated(x_rotated, rois_rotated) - s_rotated = y_rotated.sum() - s_rotated.backward() - - # roi format is (batch index, x1, y1, x2, y2) - rois = torch.tensor( - [[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9]], dtype=dtype, device=device - ) - - y = roi_align(x, rois) - s = y.sum() - s.backward() - - assert torch.allclose( - x.grad, x_rotated.grad - ), "gradients for ROIAlign and ROIAlignRotated mismatch on CUDA" - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/noelshin/selfmask/networks/resnet_backbone.py b/spaces/noelshin/selfmask/networks/resnet_backbone.py deleted file mode 100644 index 498ae808f02f60f109eb2c2302e2783360eb6db4..0000000000000000000000000000000000000000 --- a/spaces/noelshin/selfmask/networks/resnet_backbone.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python -# -*- coding:utf-8 -*- -# Author: Donny You(youansheng@gmail.com) - - -import torch.nn as nn -from networks.resnet_models import * - - -class NormalResnetBackbone(nn.Module): - def __init__(self, orig_resnet): - super(NormalResnetBackbone, self).__init__() - - self.num_features = 2048 - # take pretrained resnet, except AvgPool and FC - self.prefix = orig_resnet.prefix - self.maxpool = orig_resnet.maxpool - self.layer1 = orig_resnet.layer1 - self.layer2 = orig_resnet.layer2 - self.layer3 = orig_resnet.layer3 - self.layer4 = orig_resnet.layer4 - - def get_num_features(self): - return self.num_features - - def forward(self, x): - tuple_features = list() - x = self.prefix(x) - x = self.maxpool(x) - x = self.layer1(x) - tuple_features.append(x) - x = self.layer2(x) - tuple_features.append(x) - x = self.layer3(x) - tuple_features.append(x) - x = self.layer4(x) - tuple_features.append(x) - - return tuple_features - - -class DilatedResnetBackbone(nn.Module): - def __init__(self, orig_resnet, dilate_scale=8, multi_grid=(1, 2, 4)): - super(DilatedResnetBackbone, self).__init__() - - self.num_features = 2048 - from functools import partial - - if dilate_scale == 8: - orig_resnet.layer3.apply(partial(self._nostride_dilate, dilate=2)) - if multi_grid is None: - orig_resnet.layer4.apply(partial(self._nostride_dilate, dilate=4)) - else: - for i, r in enumerate(multi_grid): - orig_resnet.layer4[i].apply(partial(self._nostride_dilate, dilate=int(4 * r))) - - elif dilate_scale == 16: - if multi_grid is None: - orig_resnet.layer4.apply(partial(self._nostride_dilate, dilate=2)) - else: - for i, r in enumerate(multi_grid): - orig_resnet.layer4[i].apply(partial(self._nostride_dilate, dilate=int(2 * r))) - - # Take pretrained resnet, except AvgPool and FC - self.prefix = orig_resnet.prefix - self.maxpool = orig_resnet.maxpool - self.layer1 = orig_resnet.layer1 - self.layer2 = orig_resnet.layer2 - self.layer3 = orig_resnet.layer3 - self.layer4 = orig_resnet.layer4 - - def _nostride_dilate(self, m, dilate): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - # the convolution with stride - if m.stride == (2, 2): - m.stride = (1, 1) - if m.kernel_size == (3, 3): - m.dilation = (dilate // 2, dilate // 2) - m.padding = (dilate // 2, dilate // 2) - # other convoluions - else: - if m.kernel_size == (3, 3): - m.dilation = (dilate, dilate) - m.padding = (dilate, dilate) - - def get_num_features(self): - return self.num_features - - def forward(self, x): - tuple_features = list() - - x = self.prefix(x) - x = self.maxpool(x) - - x = self.layer1(x) - tuple_features.append(x) - x = self.layer2(x) - tuple_features.append(x) - x = self.layer3(x) - tuple_features.append(x) - x = self.layer4(x) - tuple_features.append(x) - - return tuple_features - - -def ResNetBackbone(backbone=None, width_multiplier=1.0, pretrained=None, multi_grid=None, norm_type='batchnorm'): - arch = backbone - - if arch == 'resnet18': - orig_resnet = resnet18(pretrained=pretrained) - arch_net = NormalResnetBackbone(orig_resnet) - arch_net.num_features = 512 - - elif arch == 'resnet18_dilated8': - orig_resnet = resnet18(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=8, multi_grid=multi_grid) - arch_net.num_features = 512 - - elif arch == 'resnet34': - orig_resnet = resnet34(pretrained=pretrained) - arch_net = NormalResnetBackbone(orig_resnet) - arch_net.num_features = 512 - - elif arch == 'resnet34_dilated8': - orig_resnet = resnet34(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=8, multi_grid=multi_grid) - arch_net.num_features = 512 - - elif arch == 'resnet34_dilated16': - orig_resnet = resnet34(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=16, multi_grid=multi_grid) - arch_net.num_features = 512 - - elif arch == 'resnet50': - orig_resnet = resnet50(pretrained=pretrained, width_multiplier=width_multiplier) - arch_net = NormalResnetBackbone(orig_resnet) - - elif arch == 'resnet50_dilated8': - orig_resnet = resnet50(pretrained=pretrained, width_multiplier=width_multiplier) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=8, multi_grid=multi_grid) - - elif arch == 'resnet50_dilated16': - orig_resnet = resnet50(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=16, multi_grid=multi_grid) - - elif arch == 'deepbase_resnet50': - if pretrained: - pretrained = 'models/backbones/pretrained/3x3resnet50-imagenet.pth' - orig_resnet = deepbase_resnet50(pretrained=pretrained) - arch_net = NormalResnetBackbone(orig_resnet) - - elif arch == 'deepbase_resnet50_dilated8': - if pretrained: - pretrained = 'models/backbones/pretrained/3x3resnet50-imagenet.pth' - # pretrained = "/home/gishin/Projects/DeepLearning/Oxford/cct/models/backbones/pretrained/3x3resnet50-imagenet.pth" - orig_resnet = deepbase_resnet50(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=8, multi_grid=multi_grid) - - elif arch == 'deepbase_resnet50_dilated16': - orig_resnet = deepbase_resnet50(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=16, multi_grid=multi_grid) - - elif arch == 'resnet101': - orig_resnet = resnet101(pretrained=pretrained) - arch_net = NormalResnetBackbone(orig_resnet) - - elif arch == 'resnet101_dilated8': - orig_resnet = resnet101(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=8, multi_grid=multi_grid) - - elif arch == 'resnet101_dilated16': - orig_resnet = resnet101(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=16, multi_grid=multi_grid) - - elif arch == 'deepbase_resnet101': - orig_resnet = deepbase_resnet101(pretrained=pretrained) - arch_net = NormalResnetBackbone(orig_resnet) - - elif arch == 'deepbase_resnet101_dilated8': - if pretrained: - pretrained = 'backbones/backbones/pretrained/3x3resnet101-imagenet.pth' - orig_resnet = deepbase_resnet101(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=8, multi_grid=multi_grid) - - elif arch == 'deepbase_resnet101_dilated16': - orig_resnet = deepbase_resnet101(pretrained=pretrained) - arch_net = DilatedResnetBackbone(orig_resnet, dilate_scale=16, multi_grid=multi_grid) - - else: - raise Exception('Architecture undefined!') - - return arch_net diff --git a/spaces/oguzakif/video-object-remover/SiamMask/data/coco/pycocotools/mask.py b/spaces/oguzakif/video-object-remover/SiamMask/data/coco/pycocotools/mask.py deleted file mode 100644 index 3e4330b14f8efc412dc261df0ef6449424baad26..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/data/coco/pycocotools/mask.py +++ /dev/null @@ -1,104 +0,0 @@ -__author__ = 'tsungyi' - -#import pycocotools._mask as _mask -from . import _mask - -# Interface for manipulating masks stored in RLE format. -# -# RLE is a simple yet efficient format for storing binary masks. RLE -# first divides a vector (or vectorized image) into a series of piecewise -# constant regions and then for each piece simply stores the length of -# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would -# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1] -# (note that the odd counts are always the numbers of zeros). Instead of -# storing the counts directly, additional compression is achieved with a -# variable bitrate representation based on a common scheme called LEB128. -# -# Compression is greatest given large piecewise constant regions. -# Specifically, the size of the RLE is proportional to the number of -# *boundaries* in M (or for an image the number of boundaries in the y -# direction). Assuming fairly simple shapes, the RLE representation is -# O(sqrt(n)) where n is number of pixels in the object. Hence space usage -# is substantially lower, especially for large simple objects (large n). -# -# Many common operations on masks can be computed directly using the RLE -# (without need for decoding). This includes computations such as area, -# union, intersection, etc. All of these operations are linear in the -# size of the RLE, in other words they are O(sqrt(n)) where n is the area -# of the object. Computing these operations on the original mask is O(n). -# Thus, using the RLE can result in substantial computational savings. -# -# The following API functions are defined: -# encode - Encode binary masks using RLE. -# decode - Decode binary masks encoded via RLE. -# merge - Compute union or intersection of encoded masks. -# iou - Compute intersection over union between masks. -# area - Compute area of encoded masks. -# toBbox - Get bounding boxes surrounding encoded masks. -# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask. -# -# Usage: -# Rs = encode( masks ) -# masks = decode( Rs ) -# R = merge( Rs, intersect=false ) -# o = iou( dt, gt, iscrowd ) -# a = area( Rs ) -# bbs = toBbox( Rs ) -# Rs = frPyObjects( [pyObjects], h, w ) -# -# In the API the following formats are used: -# Rs - [dict] Run-length encoding of binary masks -# R - dict Run-length encoding of binary mask -# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order) -# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore -# bbs - [nx4] Bounding box(es) stored as [x y w h] -# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list) -# dt,gt - May be either bounding boxes or encoded masks -# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel). -# -# Finally, a note about the intersection over union (iou) computation. -# The standard iou of a ground truth (gt) and detected (dt) object is -# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt)) -# For "crowd" regions, we use a modified criteria. If a gt object is -# marked as "iscrowd", we allow a dt to match any subregion of the gt. -# Choosing gt' in the crowd gt that best matches the dt can be done using -# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing -# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt) -# For crowd gt regions we use this modified criteria above for the iou. -# -# To compile run "python setup.py build_ext --inplace" -# Please do not contact us for help with compiling. -# -# Microsoft COCO Toolbox. version 2.0 -# Data, paper, and tutorials available at: http://mscoco.org/ -# Code written by Piotr Dollar and Tsung-Yi Lin, 2015. -# Licensed under the Simplified BSD License [see coco/license.txt] - -iou = _mask.iou -merge = _mask.merge -frPyObjects = _mask.frPyObjects - -def encode(bimask): - if len(bimask.shape) == 3: - return _mask.encode(bimask) - elif len(bimask.shape) == 2: - h, w = bimask.shape - return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0] - -def decode(rleObjs): - if type(rleObjs) == list: - return _mask.decode(rleObjs) - else: - return _mask.decode([rleObjs])[:,:,0] - -def area(rleObjs): - if type(rleObjs) == list: - return _mask.area(rleObjs) - else: - return _mask.area([rleObjs])[0] - -def toBbox(rleObjs): - if type(rleObjs) == list: - return _mask.toBbox(rleObjs) - else: - return _mask.toBbox([rleObjs])[0] diff --git a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siamrpn_resnet/test.sh b/spaces/oguzakif/video-object-remover/SiamMask/experiments/siamrpn_resnet/test.sh deleted file mode 100644 index 9e3e6c095a3f63b1624edd9e3387fd28b1bafbfb..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siamrpn_resnet/test.sh +++ /dev/null @@ -1,23 +0,0 @@ -if [ -z "$3" ] - then - echo "Need input parameter!" - echo "Usage: bash `basename "$0"` \$MODEL \$DATASETi \$GPUID" - exit -fi - -source activate siammask -ROOT=`git rev-parse --show-toplevel` -export PYTHONPATH=$ROOT:$PYTHONPATH -export PYTHONPATH=$PWD:$PYTHONPATH - -mkdir -p logs - -model=$1 -dataset=$2 -gpu=$3 - -CUDA_VISIBLE_DEVICES=$gpu python -u $ROOT/tools/test.py \ - --config config.json \ - --resume $model \ - --dataset $dataset 2>&1 | tee logs/test.log - diff --git a/spaces/osanchik/PicFinder/app.py b/spaces/osanchik/PicFinder/app.py deleted file mode 100644 index ce27c867f3ce25dc877bb6eb56fea4f0357ab8ac..0000000000000000000000000000000000000000 --- a/spaces/osanchik/PicFinder/app.py +++ /dev/null @@ -1,84 +0,0 @@ -import streamlit as st - -from main import * -from setup import * - -from PIL import Image -import time - -def show_result(search_request, - search_result, - img_dir, - container, - search_time) : - - thumbnail_width = 300 - container.header("It took me "+ "{:.2f}".format(search_time)+ " sec to find \"" +search_request+ "\" for you !") - i = 0 - for _ in range(0, 3): - for col in container.columns(2): - if i >= len(search_result): - break - image_name, comment, score = search_result[i] - - # Загрузка изображения - image = Image.open(img_dir + image_name) - - # Выравнивание изображения по ширине - image_width, image_height = image.size - aspect_ratio = thumbnail_width / image_width - new_height = int(image_height * aspect_ratio) - resized_image = image.resize((thumbnail_width, new_height), Image.ANTIALIAS) - - # Добавление подписи - if score != '' : - sim_score = f"{float(100 * score):.2f}" - sim='similarity='+sim_score + "%" - col.markdown(comment) - col.markdown(f'

                {sim}

                ', unsafe_allow_html=True) - else : - # Вывод изображения в контейнер - col.markdown(comment) - - col.image(resized_image, width=thumbnail_width) - i = i + 1 - - return - -def show_landing() : - - st.title('Find my pic!') - - search_request = st.text_input('Search for images', - 'Search ...') - - - col1, col2 = st.columns(2) - - if col1.button('Find!') and os.path.exists(IMAGE_DIR) : - results = st.container() - start_time = time.time() - search_result = search(search_request) - end_time = time.time() - show_result(search_request, - search_result, - IMAGE_DIR+'/', - results, - end_time - start_time) - - if col2.button('Find with faiss!') and os.path.exists(IMAGE_DIR) : - results = st.container() - start_time = time.time() - search_result = searchWithFaiss(search_request) - end_time = time.time() - show_result(search_request, - search_result, - IMAGE_DIR+'/', - results, - end_time - start_time) - return - - -downlad_images() - -show_landing() \ No newline at end of file diff --git a/spaces/patgpt4/MusicGen/CODE_OF_CONDUCT.md b/spaces/patgpt4/MusicGen/CODE_OF_CONDUCT.md deleted file mode 100644 index 83f431e8feeb7e80d571f39c9f6c1b96857b5f85..0000000000000000000000000000000000000000 --- a/spaces/patgpt4/MusicGen/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic -address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a -professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when there is a -reasonable belief that an individual's behavior may have a negative impact on -the project or its community. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/spaces/perilli/tortoise-tts-v2/models/classifier.py b/spaces/perilli/tortoise-tts-v2/models/classifier.py deleted file mode 100644 index c8997738a2b689cb4bd744323339e5e8b46035ae..0000000000000000000000000000000000000000 --- a/spaces/perilli/tortoise-tts-v2/models/classifier.py +++ /dev/null @@ -1,158 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.checkpoint import checkpoint - -from models.arch_util import Upsample, Downsample, normalization, zero_module, AttentionBlock - - -class ResBlock(nn.Module): - def __init__( - self, - channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - up=False, - down=False, - kernel_size=3, - do_checkpoint=True, - ): - super().__init__() - self.channels = channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_scale_shift_norm = use_scale_shift_norm - self.do_checkpoint = do_checkpoint - padding = 1 if kernel_size == 3 else 2 - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - nn.Conv1d(channels, self.out_channels, kernel_size, padding=padding), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - nn.Conv1d(self.out_channels, self.out_channels, kernel_size, padding=padding) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = nn.Conv1d( - dims, channels, self.out_channels, kernel_size, padding=padding - ) - else: - self.skip_connection = nn.Conv1d(dims, channels, self.out_channels, 1) - - def forward(self, x): - if self.do_checkpoint: - return checkpoint( - self._forward, x - ) - else: - return self._forward(x) - - def _forward(self, x): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AudioMiniEncoder(nn.Module): - def __init__(self, - spec_dim, - embedding_dim, - base_channels=128, - depth=2, - resnet_blocks=2, - attn_blocks=4, - num_attn_heads=4, - dropout=0, - downsample_factor=2, - kernel_size=3): - super().__init__() - self.init = nn.Sequential( - nn.Conv1d(spec_dim, base_channels, 3, padding=1) - ) - ch = base_channels - res = [] - self.layers = depth - for l in range(depth): - for r in range(resnet_blocks): - res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size)) - res.append(Downsample(ch, use_conv=True, out_channels=ch*2, factor=downsample_factor)) - ch *= 2 - self.res = nn.Sequential(*res) - self.final = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.Conv1d(ch, embedding_dim, 1) - ) - attn = [] - for a in range(attn_blocks): - attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False)) - self.attn = nn.Sequential(*attn) - self.dim = embedding_dim - - def forward(self, x): - h = self.init(x) - h = self.res(h) - h = self.final(h) - for blk in self.attn: - h = checkpoint(blk, h) - return h[:, :, 0] - - -class AudioMiniEncoderWithClassifierHead(nn.Module): - def __init__(self, classes, distribute_zero_label=True, **kwargs): - super().__init__() - self.enc = AudioMiniEncoder(**kwargs) - self.head = nn.Linear(self.enc.dim, classes) - self.num_classes = classes - self.distribute_zero_label = distribute_zero_label - - def forward(self, x, labels=None): - h = self.enc(x) - logits = self.head(h) - if labels is None: - return logits - else: - if self.distribute_zero_label: - oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes) - zeros_indices = (labels == 0).unsqueeze(-1) - # Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise. - zero_extra_mass = torch.full_like(oh_labels, dtype=torch.float, fill_value=.2/(self.num_classes-1)) - zero_extra_mass[:, 0] = -.2 - zero_extra_mass = zero_extra_mass * zeros_indices - oh_labels = oh_labels + zero_extra_mass - else: - oh_labels = labels - loss = nn.functional.cross_entropy(logits, oh_labels) - return loss diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/_meta.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/_meta.py deleted file mode 100644 index 259b15ba194db7c02fbcbf170d522230b4418933..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_metadata/_meta.py +++ /dev/null @@ -1,49 +0,0 @@ -from ._compat import Protocol -from typing import Any, Dict, Iterator, List, TypeVar, Union - - -_T = TypeVar("_T") - - -class PackageMetadata(Protocol): - def __len__(self) -> int: - ... # pragma: no cover - - def __contains__(self, item: str) -> bool: - ... # pragma: no cover - - def __getitem__(self, key: str) -> str: - ... # pragma: no cover - - def __iter__(self) -> Iterator[str]: - ... # pragma: no cover - - def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]: - """ - Return all values associated with a possibly multi-valued key. - """ - - @property - def json(self) -> Dict[str, Union[str, List[str]]]: - """ - A JSON-compatible form of the metadata. - """ - - -class SimplePath(Protocol[_T]): - """ - A minimal subset of pathlib.Path required by PathDistribution. - """ - - def joinpath(self) -> _T: - ... # pragma: no cover - - def __truediv__(self, other: Union[str, _T]) -> _T: - ... # pragma: no cover - - @property - def parent(self) -> _T: - ... # pragma: no cover - - def read_text(self) -> str: - ... # pragma: no cover diff --git a/spaces/poojasree2003/aiproject/README.md b/spaces/poojasree2003/aiproject/README.md deleted file mode 100644 index 5f99967042c0d4ef1bd3391e880beced5a2c853a..0000000000000000000000000000000000000000 --- a/spaces/poojasree2003/aiproject/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Aiproject -emoji: 📉 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/prerna9811/Chord/portaudio/src/common/pa_types.h b/spaces/prerna9811/Chord/portaudio/src/common/pa_types.h deleted file mode 100644 index f628783adf8333122fbdf08b3572e1959e49b78f..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/src/common/pa_types.h +++ /dev/null @@ -1,107 +0,0 @@ -#ifndef PA_TYPES_H -#define PA_TYPES_H - -/* - * Portable Audio I/O Library - * integer type definitions - * - * Based on the Open Source API proposed by Ross Bencina - * Copyright (c) 1999-2006 Ross Bencina, Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file - @ingroup common_src - - @brief Definition of 16 and 32 bit integer types (PaInt16, PaInt32 etc) - - SIZEOF_SHORT, SIZEOF_INT and SIZEOF_LONG are set by the configure script - when it is used. Otherwise we default to the common 32 bit values, if your - platform doesn't use configure, and doesn't use the default values below - you will need to explicitly define these symbols in your make file. - - A PA_VALIDATE_SIZES macro is provided to assert that the values set in this - file are correct. -*/ - -#ifndef SIZEOF_SHORT -#define SIZEOF_SHORT 2 -#endif - -#ifndef SIZEOF_INT -#define SIZEOF_INT 4 -#endif - -#ifndef SIZEOF_LONG -#define SIZEOF_LONG 4 -#endif - - -#if SIZEOF_SHORT == 2 -typedef signed short PaInt16; -typedef unsigned short PaUint16; -#elif SIZEOF_INT == 2 -typedef signed int PaInt16; -typedef unsigned int PaUint16; -#else -#error pa_types.h was unable to determine which type to use for 16bit integers on the target platform -#endif - -#if SIZEOF_SHORT == 4 -typedef signed short PaInt32; -typedef unsigned short PaUint32; -#elif SIZEOF_INT == 4 -typedef signed int PaInt32; -typedef unsigned int PaUint32; -#elif SIZEOF_LONG == 4 -typedef signed long PaInt32; -typedef unsigned long PaUint32; -#else -#error pa_types.h was unable to determine which type to use for 32bit integers on the target platform -#endif - - -/* PA_VALIDATE_TYPE_SIZES compares the size of the integer types at runtime to - ensure that PortAudio was configured correctly, and raises an assertion if - they don't match the expected values. must be included in the - context in which this macro is used. -*/ -#define PA_VALIDATE_TYPE_SIZES \ - { \ - assert( "PortAudio: type sizes are not correct in pa_types.h" && sizeof( PaUint16 ) == 2 ); \ - assert( "PortAudio: type sizes are not correct in pa_types.h" && sizeof( PaInt16 ) == 2 ); \ - assert( "PortAudio: type sizes are not correct in pa_types.h" && sizeof( PaUint32 ) == 4 ); \ - assert( "PortAudio: type sizes are not correct in pa_types.h" && sizeof( PaInt32 ) == 4 ); \ - } - - -#endif /* PA_TYPES_H */ diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/openapi/docs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/openapi/docs.py deleted file mode 100644 index 69473d19cb2569cfbec9dae86328a675947a465c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/openapi/docs.py +++ /dev/null @@ -1,344 +0,0 @@ -import json -from typing import Any, Dict, Optional - -from fastapi.encoders import jsonable_encoder -from starlette.responses import HTMLResponse -from typing_extensions import Annotated, Doc # type: ignore [attr-defined] - -swagger_ui_default_parameters: Annotated[ - Dict[str, Any], - Doc( - """ - Default configurations for Swagger UI. - - You can use it as a template to add any other configurations needed. - """ - ), -] = { - "dom_id": "#swagger-ui", - "layout": "BaseLayout", - "deepLinking": True, - "showExtensions": True, - "showCommonExtensions": True, -} - - -def get_swagger_ui_html( - *, - openapi_url: Annotated[ - str, - Doc( - """ - The OpenAPI URL that Swagger UI should load and use. - - This is normally done automatically by FastAPI using the default URL - `/openapi.json`. - """ - ), - ], - title: Annotated[ - str, - Doc( - """ - The HTML `` content, normally shown in the browser tab. - """ - ), - ], - swagger_js_url: Annotated[ - str, - Doc( - """ - The URL to use to load the Swagger UI JavaScript. - - It is normally set to a CDN URL. - """ - ), - ] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5.9.0/swagger-ui-bundle.js", - swagger_css_url: Annotated[ - str, - Doc( - """ - The URL to use to load the Swagger UI CSS. - - It is normally set to a CDN URL. - """ - ), - ] = "https://cdn.jsdelivr.net/npm/swagger-ui-dist@5.9.0/swagger-ui.css", - swagger_favicon_url: Annotated[ - str, - Doc( - """ - The URL of the favicon to use. It is normally shown in the browser tab. - """ - ), - ] = "https://fastapi.tiangolo.com/img/favicon.png", - oauth2_redirect_url: Annotated[ - Optional[str], - Doc( - """ - The OAuth2 redirect URL, it is normally automatically handled by FastAPI. - """ - ), - ] = None, - init_oauth: Annotated[ - Optional[Dict[str, Any]], - Doc( - """ - A dictionary with Swagger UI OAuth2 initialization configurations. - """ - ), - ] = None, - swagger_ui_parameters: Annotated[ - Optional[Dict[str, Any]], - Doc( - """ - Configuration parameters for Swagger UI. - - It defaults to [swagger_ui_default_parameters][fastapi.openapi.docs.swagger_ui_default_parameters]. - """ - ), - ] = None, -) -> HTMLResponse: - """ - Generate and return the HTML that loads Swagger UI for the interactive - API docs (normally served at `/docs`). - - You would only call this function yourself if you needed to override some parts, - for example the URLs to use to load Swagger UI's JavaScript and CSS. - - Read more about it in the - [FastAPI docs for Configure Swagger UI](https://fastapi.tiangolo.com/how-to/configure-swagger-ui/) - and the [FastAPI docs for Custom Docs UI Static Assets (Self-Hosting)](https://fastapi.tiangolo.com/how-to/custom-docs-ui-assets/). - """ - current_swagger_ui_parameters = swagger_ui_default_parameters.copy() - if swagger_ui_parameters: - current_swagger_ui_parameters.update(swagger_ui_parameters) - - html = f""" - <!DOCTYPE html> - <html> - <head> - <link type="text/css" rel="stylesheet" href="{swagger_css_url}"> - <link rel="shortcut icon" href="{swagger_favicon_url}"> - <title>{title} - - -
                -
                - - - - - - """ - return HTMLResponse(html) - - -def get_redoc_html( - *, - openapi_url: Annotated[ - str, - Doc( - """ - The OpenAPI URL that ReDoc should load and use. - - This is normally done automatically by FastAPI using the default URL - `/openapi.json`. - """ - ), - ], - title: Annotated[ - str, - Doc( - """ - The HTML `` content, normally shown in the browser tab. - """ - ), - ], - redoc_js_url: Annotated[ - str, - Doc( - """ - The URL to use to load the ReDoc JavaScript. - - It is normally set to a CDN URL. - """ - ), - ] = "https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js", - redoc_favicon_url: Annotated[ - str, - Doc( - """ - The URL of the favicon to use. It is normally shown in the browser tab. - """ - ), - ] = "https://fastapi.tiangolo.com/img/favicon.png", - with_google_fonts: Annotated[ - bool, - Doc( - """ - Load and use Google Fonts. - """ - ), - ] = True, -) -> HTMLResponse: - """ - Generate and return the HTML response that loads ReDoc for the alternative - API docs (normally served at `/redoc`). - - You would only call this function yourself if you needed to override some parts, - for example the URLs to use to load ReDoc's JavaScript and CSS. - - Read more about it in the - [FastAPI docs for Custom Docs UI Static Assets (Self-Hosting)](https://fastapi.tiangolo.com/how-to/custom-docs-ui-assets/). - """ - html = f""" - <!DOCTYPE html> - <html> - <head> - <title>{title} - - - - """ - if with_google_fonts: - html += """ - - """ - html += f""" - - - - - - - - - - - """ - return HTMLResponse(html) - - -def get_swagger_ui_oauth2_redirect_html() -> HTMLResponse: - """ - Generate the HTML response with the OAuth2 redirection for Swagger UI. - - You normally don't need to use or change this. - """ - # copied from https://github.com/swagger-api/swagger-ui/blob/v4.14.0/dist/oauth2-redirect.html - html = """ - - - - Swagger UI: OAuth2 Redirect - - - - - - """ - return HTMLResponse(content=html) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py deleted file mode 100644 index a45230e8dbd8399fdd2a5d292bf71fe96c271b78..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py +++ /dev/null @@ -1,78 +0,0 @@ -from fontTools.ttLib import TTFont -from fontTools.feaLib.builder import addOpenTypeFeatures, Builder -from fontTools.feaLib.error import FeatureLibError -from fontTools import configLogger -from fontTools.misc.cliTools import makeOutputFileName -import sys -import argparse -import logging - - -log = logging.getLogger("fontTools.feaLib") - - -def main(args=None): - """Add features from a feature file (.fea) into an OTF font""" - parser = argparse.ArgumentParser( - description="Use fontTools to compile OpenType feature files (*.fea)." - ) - parser.add_argument( - "input_fea", metavar="FEATURES", help="Path to the feature file" - ) - parser.add_argument( - "input_font", metavar="INPUT_FONT", help="Path to the input font" - ) - parser.add_argument( - "-o", - "--output", - dest="output_font", - metavar="OUTPUT_FONT", - help="Path to the output font.", - ) - parser.add_argument( - "-t", - "--tables", - metavar="TABLE_TAG", - choices=Builder.supportedTables, - nargs="+", - help="Specify the table(s) to be built.", - ) - parser.add_argument( - "-d", - "--debug", - action="store_true", - help="Add source-level debugging information to font.", - ) - parser.add_argument( - "-v", - "--verbose", - help="Increase the logger verbosity. Multiple -v " "options are allowed.", - action="count", - default=0, - ) - parser.add_argument( - "--traceback", help="show traceback for exceptions.", action="store_true" - ) - options = parser.parse_args(args) - - levels = ["WARNING", "INFO", "DEBUG"] - configLogger(level=levels[min(len(levels) - 1, options.verbose)]) - - output_font = options.output_font or makeOutputFileName(options.input_font) - log.info("Compiling features to '%s'" % (output_font)) - - font = TTFont(options.input_font) - try: - addOpenTypeFeatures( - font, options.input_fea, tables=options.tables, debug=options.debug - ) - except FeatureLibError as e: - if options.traceback: - raise - log.error(e) - sys.exit(1) - font.save(output_font) - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/testing/jpl_units/Duration.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/testing/jpl_units/Duration.py deleted file mode 100644 index 052c5a47c0fd8313795acfcfc2e1aaf851d05c60..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/testing/jpl_units/Duration.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Duration module.""" - -import functools -import operator - -from matplotlib import _api - - -class Duration: - """Class Duration in development.""" - - allowed = ["ET", "UTC"] - - def __init__(self, frame, seconds): - """ - Create a new Duration object. - - = ERROR CONDITIONS - - If the input frame is not in the allowed list, an error is thrown. - - = INPUT VARIABLES - - frame The frame of the duration. Must be 'ET' or 'UTC' - - seconds The number of seconds in the Duration. - """ - _api.check_in_list(self.allowed, frame=frame) - self._frame = frame - self._seconds = seconds - - def frame(self): - """Return the frame the duration is in.""" - return self._frame - - def __abs__(self): - """Return the absolute value of the duration.""" - return Duration(self._frame, abs(self._seconds)) - - def __neg__(self): - """Return the negative value of this Duration.""" - return Duration(self._frame, -self._seconds) - - def seconds(self): - """Return the number of seconds in the Duration.""" - return self._seconds - - def __bool__(self): - return self._seconds != 0 - - def _cmp(self, op, rhs): - """ - Check that *self* and *rhs* share frames; compare them using *op*. - """ - self.checkSameFrame(rhs, "compare") - return op(self._seconds, rhs._seconds) - - __eq__ = functools.partialmethod(_cmp, operator.eq) - __ne__ = functools.partialmethod(_cmp, operator.ne) - __lt__ = functools.partialmethod(_cmp, operator.lt) - __le__ = functools.partialmethod(_cmp, operator.le) - __gt__ = functools.partialmethod(_cmp, operator.gt) - __ge__ = functools.partialmethod(_cmp, operator.ge) - - def __add__(self, rhs): - """ - Add two Durations. - - = ERROR CONDITIONS - - If the input rhs is not in the same frame, an error is thrown. - - = INPUT VARIABLES - - rhs The Duration to add. - - = RETURN VALUE - - Returns the sum of ourselves and the input Duration. - """ - # Delay-load due to circular dependencies. - import matplotlib.testing.jpl_units as U - - if isinstance(rhs, U.Epoch): - return rhs + self - - self.checkSameFrame(rhs, "add") - return Duration(self._frame, self._seconds + rhs._seconds) - - def __sub__(self, rhs): - """ - Subtract two Durations. - - = ERROR CONDITIONS - - If the input rhs is not in the same frame, an error is thrown. - - = INPUT VARIABLES - - rhs The Duration to subtract. - - = RETURN VALUE - - Returns the difference of ourselves and the input Duration. - """ - self.checkSameFrame(rhs, "sub") - return Duration(self._frame, self._seconds - rhs._seconds) - - def __mul__(self, rhs): - """ - Scale a UnitDbl by a value. - - = INPUT VARIABLES - - rhs The scalar to multiply by. - - = RETURN VALUE - - Returns the scaled Duration. - """ - return Duration(self._frame, self._seconds * float(rhs)) - - __rmul__ = __mul__ - - def __str__(self): - """Print the Duration.""" - return f"{self._seconds:g} {self._frame}" - - def __repr__(self): - """Print the Duration.""" - return f"Duration('{self._frame}', {self._seconds:g})" - - def checkSameFrame(self, rhs, func): - """ - Check to see if frames are the same. - - = ERROR CONDITIONS - - If the frame of the rhs Duration is not the same as our frame, - an error is thrown. - - = INPUT VARIABLES - - rhs The Duration to check for the same frame - - func The name of the function doing the check. - """ - if self._frame != rhs._frame: - raise ValueError( - f"Cannot {func} Durations with different frames.\n" - f"LHS: {self._frame}\n" - f"RHS: {rhs._frame}") diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_sankey.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_sankey.py deleted file mode 100644 index cbb7f516a65c4a9bb745365ade4c0cdfe87fd43b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_sankey.py +++ /dev/null @@ -1,105 +0,0 @@ -import pytest -from numpy.testing import assert_allclose, assert_array_equal - -from matplotlib.sankey import Sankey -from matplotlib.testing.decorators import check_figures_equal - - -def test_sankey(): - # lets just create a sankey instance and check the code runs - sankey = Sankey() - sankey.add() - - -def test_label(): - s = Sankey(flows=[0.25], labels=['First'], orientations=[-1]) - assert s.diagrams[0].texts[0].get_text() == 'First\n0.25' - - -def test_format_using_callable(): - # test using callable by slightly incrementing above label example - - def show_three_decimal_places(value): - return f'{value:.3f}' - - s = Sankey(flows=[0.25], labels=['First'], orientations=[-1], - format=show_three_decimal_places) - - assert s.diagrams[0].texts[0].get_text() == 'First\n0.250' - - -@pytest.mark.parametrize('kwargs, msg', ( - ({'gap': -1}, "'gap' is negative"), - ({'gap': 1, 'radius': 2}, "'radius' is greater than 'gap'"), - ({'head_angle': -1}, "'head_angle' is negative"), - ({'tolerance': -1}, "'tolerance' is negative"), - ({'flows': [1, -1], 'orientations': [-1, 0, 1]}, - r"The shapes of 'flows' \(2,\) and 'orientations'"), - ({'flows': [1, -1], 'labels': ['a', 'b', 'c']}, - r"The shapes of 'flows' \(2,\) and 'labels'"), - )) -def test_sankey_errors(kwargs, msg): - with pytest.raises(ValueError, match=msg): - Sankey(**kwargs) - - -@pytest.mark.parametrize('kwargs, msg', ( - ({'trunklength': -1}, "'trunklength' is negative"), - ({'flows': [0.2, 0.3], 'prior': 0}, "The scaled sum of the connected"), - ({'prior': -1}, "The index of the prior diagram is negative"), - ({'prior': 1}, "The index of the prior diagram is 1"), - ({'connect': (-1, 1), 'prior': 0}, "At least one of the connection"), - ({'connect': (2, 1), 'prior': 0}, "The connection index to the source"), - ({'connect': (1, 3), 'prior': 0}, "The connection index to this dia"), - ({'connect': (1, 1), 'prior': 0, 'flows': [-0.2, 0.2], - 'orientations': [2]}, "The value of orientations"), - ({'connect': (1, 1), 'prior': 0, 'flows': [-0.2, 0.2], - 'pathlengths': [2]}, "The lengths of 'flows'"), - )) -def test_sankey_add_errors(kwargs, msg): - sankey = Sankey() - with pytest.raises(ValueError, match=msg): - sankey.add(flows=[0.2, -0.2]) - sankey.add(**kwargs) - - -def test_sankey2(): - s = Sankey(flows=[0.25, -0.25, 0.5, -0.5], labels=['Foo'], - orientations=[-1], unit='Bar') - sf = s.finish() - assert_array_equal(sf[0].flows, [0.25, -0.25, 0.5, -0.5]) - assert sf[0].angles == [1, 3, 1, 3] - assert all([text.get_text()[0:3] == 'Foo' for text in sf[0].texts]) - assert all([text.get_text()[-3:] == 'Bar' for text in sf[0].texts]) - assert sf[0].text.get_text() == '' - assert_allclose(sf[0].tips, - [(-1.375, -0.52011255), - (1.375, -0.75506044), - (-0.75, -0.41522509), - (0.75, -0.8599479)]) - - s = Sankey(flows=[0.25, -0.25, 0, 0.5, -0.5], labels=['Foo'], - orientations=[-1], unit='Bar') - sf = s.finish() - assert_array_equal(sf[0].flows, [0.25, -0.25, 0, 0.5, -0.5]) - assert sf[0].angles == [1, 3, None, 1, 3] - assert_allclose(sf[0].tips, - [(-1.375, -0.52011255), - (1.375, -0.75506044), - (0, 0), - (-0.75, -0.41522509), - (0.75, -0.8599479)]) - - -@check_figures_equal(extensions=['png']) -def test_sankey3(fig_test, fig_ref): - ax_test = fig_test.gca() - s_test = Sankey(ax=ax_test, flows=[0.25, -0.25, -0.25, 0.25, 0.5, -0.5], - orientations=[1, -1, 1, -1, 0, 0]) - s_test.finish() - - ax_ref = fig_ref.gca() - s_ref = Sankey(ax=ax_ref) - s_ref.add(flows=[0.25, -0.25, -0.25, 0.25, 0.5, -0.5], - orientations=[1, -1, 1, -1, 0, 0]) - s_ref.finish() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py deleted file mode 100644 index 3734344d2a85c23a762edbd104a4dac20806c5a0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/tests/test_polynomial.py +++ /dev/null @@ -1,303 +0,0 @@ -import numpy as np -from numpy.testing import ( - assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose - ) - -import pytest - -# `poly1d` has some support for `bool_` and `timedelta64`, -# but it is limited and they are therefore excluded here -TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O" - - -class TestPolynomial: - def test_poly1d_str_and_repr(self): - p = np.poly1d([1., 2, 3]) - assert_equal(repr(p), 'poly1d([1., 2., 3.])') - assert_equal(str(p), - ' 2\n' - '1 x + 2 x + 3') - - q = np.poly1d([3., 2, 1]) - assert_equal(repr(q), 'poly1d([3., 2., 1.])') - assert_equal(str(q), - ' 2\n' - '3 x + 2 x + 1') - - r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) - assert_equal(str(r), - ' 3 2\n' - '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') - - assert_equal(str(np.poly1d([-3, -2, -1])), - ' 2\n' - '-3 x - 2 x - 1') - - def test_poly1d_resolution(self): - p = np.poly1d([1., 2, 3]) - q = np.poly1d([3., 2, 1]) - assert_equal(p(0), 3.0) - assert_equal(p(5), 38.0) - assert_equal(q(0), 1.0) - assert_equal(q(5), 86.0) - - def test_poly1d_math(self): - # here we use some simple coeffs to make calculations easier - p = np.poly1d([1., 2, 4]) - q = np.poly1d([4., 2, 1]) - assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) - assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.])) - assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.])) - - p = np.poly1d([1., 2, 3]) - q = np.poly1d([3., 2, 1]) - assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) - assert_equal(p + q, np.poly1d([4., 4., 4.])) - assert_equal(p - q, np.poly1d([-2., 0., 2.])) - assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) - assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) - assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) - assert_equal(p.deriv(), np.poly1d([2., 2.])) - assert_equal(p.deriv(2), np.poly1d([2.])) - assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), - (np.poly1d([1., -1.]), np.poly1d([0.]))) - - @pytest.mark.parametrize("type_code", TYPE_CODES) - def test_poly1d_misc(self, type_code: str) -> None: - dtype = np.dtype(type_code) - ar = np.array([1, 2, 3], dtype=dtype) - p = np.poly1d(ar) - - # `__eq__` - assert_equal(np.asarray(p), ar) - assert_equal(np.asarray(p).dtype, dtype) - assert_equal(len(p), 2) - - # `__getitem__` - comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0} - for index, ref in comparison_dct.items(): - scalar = p[index] - assert_equal(scalar, ref) - if dtype == np.object_: - assert isinstance(scalar, int) - else: - assert_equal(scalar.dtype, dtype) - - def test_poly1d_variable_arg(self): - q = np.poly1d([1., 2, 3], variable='y') - assert_equal(str(q), - ' 2\n' - '1 y + 2 y + 3') - q = np.poly1d([1., 2, 3], variable='lambda') - assert_equal(str(q), - ' 2\n' - '1 lambda + 2 lambda + 3') - - def test_poly(self): - assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), - [1, -3, -2, 6]) - - # From matlab docs - A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] - assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) - - # Should produce real output for perfect conjugates - assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) - assert_(np.isrealobj(np.poly([0+1j, -0+-1j, 1+2j, - 1-2j, 1.+3.5j, 1-3.5j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j, 1+3j, 1-3.j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 1+2j, 1-2j]))) - assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) - assert_(np.isrealobj(np.poly([1j, -1j]))) - assert_(np.isrealobj(np.poly([1, -1]))) - - assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) - - np.random.seed(42) - a = np.random.randn(100) + 1j*np.random.randn(100) - assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) - - def test_roots(self): - assert_array_equal(np.roots([1, 0, 0]), [0, 0]) - - def test_str_leading_zeros(self): - p = np.poly1d([4, 3, 2, 1]) - p[3] = 0 - assert_equal(str(p), - " 2\n" - "3 x + 2 x + 1") - - p = np.poly1d([1, 2]) - p[0] = 0 - p[1] = 0 - assert_equal(str(p), " \n0") - - def test_polyfit(self): - c = np.array([3., 2., 1.]) - x = np.linspace(0, 2, 7) - y = np.polyval(c, x) - err = [1, -1, 1, -1, 1, -1, 1] - weights = np.arange(8, 1, -1)**2/7.0 - - # Check exception when too few points for variance estimate. Note that - # the estimate requires the number of data points to exceed - # degree + 1 - assert_raises(ValueError, np.polyfit, - [1], [1], deg=0, cov=True) - - # check 1D case - m, cov = np.polyfit(x, y+err, 2, cov=True) - est = [3.8571, 0.2857, 1.619] - assert_almost_equal(est, m, decimal=4) - val0 = [[ 1.4694, -2.9388, 0.8163], - [-2.9388, 6.3673, -2.1224], - [ 0.8163, -2.1224, 1.161 ]] - assert_almost_equal(val0, cov, decimal=4) - - m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True) - assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) - val = [[ 4.3964, -5.0052, 0.4878], - [-5.0052, 6.8067, -0.9089], - [ 0.4878, -0.9089, 0.3337]] - assert_almost_equal(val, cov2, decimal=4) - - m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled") - assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) - val = [[ 0.1473, -0.1677, 0.0163], - [-0.1677, 0.228 , -0.0304], - [ 0.0163, -0.0304, 0.0112]] - assert_almost_equal(val, cov3, decimal=4) - - # check 2D (n,1) case - y = y[:, np.newaxis] - c = c[:, np.newaxis] - assert_almost_equal(c, np.polyfit(x, y, 2)) - # check 2D (n,2) case - yy = np.concatenate((y, y), axis=1) - cc = np.concatenate((c, c), axis=1) - assert_almost_equal(cc, np.polyfit(x, yy, 2)) - - m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) - assert_almost_equal(est, m[:, 0], decimal=4) - assert_almost_equal(est, m[:, 1], decimal=4) - assert_almost_equal(val0, cov[:, :, 0], decimal=4) - assert_almost_equal(val0, cov[:, :, 1], decimal=4) - - # check order 1 (deg=0) case, were the analytic results are simple - np.random.seed(123) - y = np.random.normal(size=(4, 10000)) - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) - # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) - # Without scaling, since reduced chi2 is 1, the result should be the same. - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), - deg=0, cov="unscaled") - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_almost_equal(np.sqrt(cov.mean()), 0.5) - # If we estimate our errors wrong, no change with scaling: - w = np.full(y.shape[0], 1./0.5) - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) - # But if we do not scale, our estimate for the error in the mean will - # differ. - mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") - assert_allclose(mean.std(), 0.5, atol=0.01) - assert_almost_equal(np.sqrt(cov.mean()), 0.25) - - def test_objects(self): - from decimal import Decimal - p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) - p2 = p * Decimal('1.333333333333333') - assert_(p2[1] == Decimal("3.9999999999999990")) - p2 = p.deriv() - assert_(p2[1] == Decimal('8.0')) - p2 = p.integ() - assert_(p2[3] == Decimal("1.333333333333333333333333333")) - assert_(p2[2] == Decimal('1.5')) - assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) - p = np.poly([Decimal(1), Decimal(2)]) - assert_equal(np.poly([Decimal(1), Decimal(2)]), - [1, Decimal(-3), Decimal(2)]) - - def test_complex(self): - p = np.poly1d([3j, 2j, 1j]) - p2 = p.integ() - assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) - p2 = p.deriv() - assert_((p2.coeffs == [6j, 2j]).all()) - - def test_integ_coeffs(self): - p = np.poly1d([3, 2, 1]) - p2 = p.integ(3, k=[9, 7, 6]) - assert_( - (p2.coeffs == [1/4./5., 1/3./4., 1/2./3., 9/1./2., 7, 6]).all()) - - def test_zero_dims(self): - try: - np.poly(np.zeros((0, 0))) - except ValueError: - pass - - def test_poly_int_overflow(self): - """ - Regression test for gh-5096. - """ - v = np.arange(1, 21) - assert_almost_equal(np.poly(v), np.poly(np.diag(v))) - - def test_zero_poly_dtype(self): - """ - Regression test for gh-16354. - """ - z = np.array([0, 0, 0]) - p = np.poly1d(z.astype(np.int64)) - assert_equal(p.coeffs.dtype, np.int64) - - p = np.poly1d(z.astype(np.float32)) - assert_equal(p.coeffs.dtype, np.float32) - - p = np.poly1d(z.astype(np.complex64)) - assert_equal(p.coeffs.dtype, np.complex64) - - def test_poly_eq(self): - p = np.poly1d([1, 2, 3]) - p2 = np.poly1d([1, 2, 4]) - assert_equal(p == None, False) - assert_equal(p != None, True) - assert_equal(p == p, True) - assert_equal(p == p2, False) - assert_equal(p != p2, True) - - def test_polydiv(self): - b = np.poly1d([2, 6, 6, 1]) - a = np.poly1d([-1j, (1+2j), -(2+1j), 1]) - q, r = np.polydiv(b, a) - assert_equal(q.coeffs.dtype, np.complex128) - assert_equal(r.coeffs.dtype, np.complex128) - assert_equal(q*a + r, b) - - c = [1, 2, 3] - d = np.poly1d([1, 2, 3]) - s, t = np.polydiv(c, d) - assert isinstance(s, np.poly1d) - assert isinstance(t, np.poly1d) - u, v = np.polydiv(d, c) - assert isinstance(u, np.poly1d) - assert isinstance(v, np.poly1d) - - def test_poly_coeffs_mutable(self): - """ Coefficients should be modifiable """ - p = np.poly1d([1, 2, 3]) - - p.coeffs += 1 - assert_equal(p.coeffs, [2, 3, 4]) - - p.coeffs[2] += 10 - assert_equal(p.coeffs, [2, 3, 14]) - - # this never used to be allowed - let's not add features to deprecated - # APIs - assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/copy_view/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/copy_view/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.py deleted file mode 100644 index 6a3e6b8c0e0596cfad38bfd1e02fd1b0f34e4ddb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_period.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - DataFrame, - DatetimeIndex, - PeriodIndex, - Series, - date_range, - period_range, -) -import pandas._testing as tm - - -class TestToPeriod: - def test_to_period(self, frame_or_series): - K = 5 - - dr = date_range("1/1/2000", "1/1/2001", freq="D") - obj = DataFrame( - np.random.default_rng(2).standard_normal((len(dr), K)), - index=dr, - columns=["A", "B", "C", "D", "E"], - ) - obj["mix"] = "a" - obj = tm.get_obj(obj, frame_or_series) - - pts = obj.to_period() - exp = obj.copy() - exp.index = period_range("1/1/2000", "1/1/2001") - tm.assert_equal(pts, exp) - - pts = obj.to_period("M") - exp.index = exp.index.asfreq("M") - tm.assert_equal(pts, exp) - - def test_to_period_without_freq(self, frame_or_series): - # GH#7606 without freq - idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"]) - exp_idx = PeriodIndex( - ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D" - ) - - obj = DataFrame( - np.random.default_rng(2).standard_normal((4, 4)), index=idx, columns=idx - ) - obj = tm.get_obj(obj, frame_or_series) - expected = obj.copy() - expected.index = exp_idx - tm.assert_equal(obj.to_period(), expected) - - if frame_or_series is DataFrame: - expected = obj.copy() - expected.columns = exp_idx - tm.assert_frame_equal(obj.to_period(axis=1), expected) - - def test_to_period_columns(self): - dr = date_range("1/1/2000", "1/1/2001") - df = DataFrame(np.random.default_rng(2).standard_normal((len(dr), 5)), index=dr) - df["mix"] = "a" - - df = df.T - pts = df.to_period(axis=1) - exp = df.copy() - exp.columns = period_range("1/1/2000", "1/1/2001") - tm.assert_frame_equal(pts, exp) - - pts = df.to_period("M", axis=1) - tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) - - def test_to_period_invalid_axis(self): - dr = date_range("1/1/2000", "1/1/2001") - df = DataFrame(np.random.default_rng(2).standard_normal((len(dr), 5)), index=dr) - df["mix"] = "a" - - msg = "No axis named 2 for object type DataFrame" - with pytest.raises(ValueError, match=msg): - df.to_period(axis=2) - - def test_to_period_raises(self, index, frame_or_series): - # https://github.com/pandas-dev/pandas/issues/33327 - obj = Series(index=index, dtype=object) - if frame_or_series is DataFrame: - obj = obj.to_frame() - - if not isinstance(index, DatetimeIndex): - msg = f"unsupported Type {type(index).__name__}" - with pytest.raises(TypeError, match=msg): - obj.to_period() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/generic/test_frame.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/generic/test_frame.py deleted file mode 100644 index 620d5055f5d3b56408f30dae3d3c83cae9af48a8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/generic/test_frame.py +++ /dev/null @@ -1,209 +0,0 @@ -from copy import deepcopy -from operator import methodcaller - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - MultiIndex, - Series, - date_range, -) -import pandas._testing as tm - - -class TestDataFrame: - @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) - def test_set_axis_name(self, func): - df = DataFrame([[1, 2], [3, 4]]) - - result = methodcaller(func, "foo")(df) - assert df.index.name is None - assert result.index.name == "foo" - - result = methodcaller(func, "cols", axis=1)(df) - assert df.columns.name is None - assert result.columns.name == "cols" - - @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) - def test_set_axis_name_mi(self, func): - df = DataFrame( - np.empty((3, 3)), - index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]), - columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]), - ) - - level_names = ["L1", "L2"] - - result = methodcaller(func, level_names)(df) - assert result.index.names == level_names - assert result.columns.names == [None, None] - - result = methodcaller(func, level_names, axis=1)(df) - assert result.columns.names == ["L1", "L2"] - assert result.index.names == [None, None] - - def test_nonzero_single_element(self): - # allow single item via bool method - msg_warn = ( - "DataFrame.bool is now deprecated and will be removed " - "in future version of pandas" - ) - df = DataFrame([[True]]) - df1 = DataFrame([[False]]) - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - assert df.bool() - - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - assert not df1.bool() - - df = DataFrame([[False, False]]) - msg_err = "The truth value of a DataFrame is ambiguous" - with pytest.raises(ValueError, match=msg_err): - bool(df) - - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - with pytest.raises(ValueError, match=msg_err): - df.bool() - - def test_metadata_propagation_indiv_groupby(self): - # groupby - df = DataFrame( - { - "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], - "B": ["one", "one", "two", "three", "two", "two", "one", "three"], - "C": np.random.default_rng(2).standard_normal(8), - "D": np.random.default_rng(2).standard_normal(8), - } - ) - result = df.groupby("A").sum() - tm.assert_metadata_equivalent(df, result) - - def test_metadata_propagation_indiv_resample(self): - # resample - df = DataFrame( - np.random.default_rng(2).standard_normal((1000, 2)), - index=date_range("20130101", periods=1000, freq="s"), - ) - result = df.resample("1T") - tm.assert_metadata_equivalent(df, result) - - def test_metadata_propagation_indiv(self, monkeypatch): - # merging with override - # GH 6923 - - def finalize(self, other, method=None, **kwargs): - for name in self._metadata: - if method == "merge": - left, right = other.left, other.right - value = getattr(left, name, "") + "|" + getattr(right, name, "") - object.__setattr__(self, name, value) - elif method == "concat": - value = "+".join( - [getattr(o, name) for o in other.objs if getattr(o, name, None)] - ) - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, "")) - - return self - - with monkeypatch.context() as m: - m.setattr(DataFrame, "_metadata", ["filename"]) - m.setattr(DataFrame, "__finalize__", finalize) - - df1 = DataFrame( - np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"] - ) - df2 = DataFrame( - np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"] - ) - DataFrame._metadata = ["filename"] - df1.filename = "fname1.csv" - df2.filename = "fname2.csv" - - result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner") - assert result.filename == "fname1.csv|fname2.csv" - - # concat - # GH#6927 - df1 = DataFrame( - np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab") - ) - df1.filename = "foo" - - result = pd.concat([df1, df1]) - assert result.filename == "foo+foo" - - def test_set_attribute(self): - # Test for consistent setattr behavior when an attribute and a column - # have the same name (Issue #8994) - df = DataFrame({"x": [1, 2, 3]}) - - df.y = 2 - df["y"] = [2, 4, 6] - df.y = 5 - - assert df.y == 5 - tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y")) - - def test_deepcopy_empty(self): - # This test covers empty frame copying with non-empty column sets - # as reported in issue GH15370 - empty_frame = DataFrame(data=[], index=[], columns=["A"]) - empty_frame_copy = deepcopy(empty_frame) - - tm.assert_frame_equal(empty_frame_copy, empty_frame) - - -# formerly in Generic but only test DataFrame -class TestDataFrame2: - @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) - def test_validate_bool_args(self, value): - df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) - - msg = 'For argument "inplace" expected type bool, received type' - with pytest.raises(ValueError, match=msg): - df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value) - - with pytest.raises(ValueError, match=msg): - df.copy().drop("a", axis=1, inplace=value) - - with pytest.raises(ValueError, match=msg): - df.copy().fillna(value=0, inplace=value) - - with pytest.raises(ValueError, match=msg): - df.copy().replace(to_replace=1, value=7, inplace=value) - - with pytest.raises(ValueError, match=msg): - df.copy().interpolate(inplace=value) - - with pytest.raises(ValueError, match=msg): - df.copy()._where(cond=df.a > 2, inplace=value) - - with pytest.raises(ValueError, match=msg): - df.copy().mask(cond=df.a > 2, inplace=value) - - def test_unexpected_keyword(self): - # GH8597 - df = DataFrame( - np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"] - ) - ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) - ts = df["joe"].copy() - ts[2] = np.nan - - msg = "unexpected keyword" - with pytest.raises(TypeError, match=msg): - df.drop("joe", axis=1, in_place=True) - - with pytest.raises(TypeError, match=msg): - df.reindex([1, 0], inplace=True) - - with pytest.raises(TypeError, match=msg): - ca.fillna(0, inplace=True) - - with pytest.raises(TypeError, match=msg): - ts.fillna(0, in_place=True) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py deleted file mode 100644 index 9846621b5fb0c74a5c6877cc00ad82d3d37744eb..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/parser/usecols/test_usecols_basic.py +++ /dev/null @@ -1,471 +0,0 @@ -""" -Tests the usecols functionality during parsing -for all of the parsers defined in parsers.py -""" -from io import StringIO - -import numpy as np -import pytest - -from pandas.errors import ParserError - -from pandas import ( - DataFrame, - Index, - array, -) -import pandas._testing as tm - -_msg_validate_usecols_arg = ( - "'usecols' must either be list-like " - "of all strings, all unicode, all " - "integers or a callable." -) -_msg_validate_usecols_names = ( - "Usecols do not match columns, columns expected but not found: {0}" -) - -# TODO: Switch to xfails -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") - - -def test_raise_on_mixed_dtype_usecols(all_parsers): - # See gh-12678 - data = """a,b,c - 1000,2000,3000 - 4000,5000,6000 - """ - usecols = [0, "b", 2] - parser = all_parsers - - with pytest.raises(ValueError, match=_msg_validate_usecols_arg): - parser.read_csv(StringIO(data), usecols=usecols) - - -@skip_pyarrow -@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")]) -def test_usecols(all_parsers, usecols): - data = """\ -a,b,c -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - parser = all_parsers - result = parser.read_csv(StringIO(data), usecols=usecols) - - expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"]) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_with_names(all_parsers): - data = """\ -a,b,c -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - parser = all_parsers - names = ["foo", "bar"] - result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0) - - expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize( - "names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])] -) -def test_usecols_relative_to_names(all_parsers, names, usecols): - data = """\ -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - parser = all_parsers - result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols) - - expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"]) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_relative_to_names2(all_parsers): - # see gh-5766 - data = """\ -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - parser = all_parsers - result = parser.read_csv( - StringIO(data), names=["a", "b"], header=None, usecols=[0, 1] - ) - - expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"]) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_name_length_conflict(all_parsers): - data = """\ -1,2,3 -4,5,6 -7,8,9 -10,11,12""" - parser = all_parsers - msg = "Number of passed names did not match number of header fields in the file" - - with pytest.raises(ValueError, match=msg): - parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1]) - - -def test_usecols_single_string(all_parsers): - # see gh-20558 - parser = all_parsers - data = """foo, bar, baz -1000, 2000, 3000 -4000, 5000, 6000""" - - with pytest.raises(ValueError, match=_msg_validate_usecols_arg): - parser.read_csv(StringIO(data), usecols="foo") - - -@skip_pyarrow -@pytest.mark.parametrize( - "data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"] -) -def test_usecols_index_col_false(all_parsers, data): - # see gh-9082 - parser = all_parsers - usecols = ["a", "c", "d"] - expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]}) - - result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize("index_col", ["b", 0]) -@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]]) -def test_usecols_index_col_conflict(all_parsers, usecols, index_col): - # see gh-4201: test that index_col as integer reflects usecols - parser = all_parsers - data = "a,b,c,d\nA,a,1,one\nB,b,2,two" - expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b")) - - result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col) - tm.assert_frame_equal(result, expected) - - -def test_usecols_index_col_conflict2(all_parsers): - # see gh-4201: test that index_col as integer reflects usecols - parser = all_parsers - data = "a,b,c,d\nA,a,1,one\nB,b,2,two" - - expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")}) - expected = expected.set_index(["b", "c"]) - - result = parser.read_csv( - StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"] - ) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_implicit_index_col(all_parsers): - # see gh-2654 - parser = all_parsers - data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10" - - result = parser.read_csv(StringIO(data), usecols=["a", "b"]) - expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) - tm.assert_frame_equal(result, expected) - - -def test_usecols_index_col_middle(all_parsers): - # GH#9098 - parser = all_parsers - data = """a,b,c,d -1,2,3,4 -""" - result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="c") - expected = DataFrame({"b": [2], "d": [4]}, index=Index([3], name="c")) - tm.assert_frame_equal(result, expected) - - -def test_usecols_index_col_end(all_parsers): - # GH#9098 - parser = all_parsers - data = """a,b,c,d -1,2,3,4 -""" - result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="d") - expected = DataFrame({"b": [2], "c": [3]}, index=Index([4], name="d")) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_regex_sep(all_parsers): - # see gh-2733 - parser = all_parsers - data = "a b c\n4 apple bat 5.7\n8 orange cow 10" - result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b")) - - expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_with_whitespace(all_parsers): - parser = all_parsers - data = "a b c\n4 apple bat 5.7\n8 orange cow 10" - - result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b")) - expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize( - "usecols,expected", - [ - # Column selection by index. - ([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])), - # Column selection by name. - ( - ["0", "1"], - DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"]), - ), - ], -) -def test_usecols_with_integer_like_header(all_parsers, usecols, expected): - parser = all_parsers - data = """2,0,1 -1000,2000,3000 -4000,5000,6000""" - - result = parser.read_csv(StringIO(data), usecols=usecols) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_empty_usecols(all_parsers): - data = "a,b,c\n1,2,3\n4,5,6" - expected = DataFrame(columns=Index([])) - parser = all_parsers - - result = parser.read_csv(StringIO(data), usecols=set()) - tm.assert_frame_equal(result, expected) - - -def test_np_array_usecols(all_parsers): - # see gh-12546 - parser = all_parsers - data = "a,b,c\n1,2,3" - usecols = np.array(["a", "b"]) - - expected = DataFrame([[1, 2]], columns=usecols) - result = parser.read_csv(StringIO(data), usecols=usecols) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize( - "usecols,expected", - [ - ( - lambda x: x.upper() in ["AAA", "BBB", "DDD"], - DataFrame( - { - "AaA": { - 0: 0.056674972999999997, - 1: 2.6132309819999997, - 2: 3.5689350380000002, - }, - "bBb": {0: 8, 1: 2, 2: 7}, - "ddd": {0: "a", 1: "b", 2: "a"}, - } - ), - ), - (lambda x: False, DataFrame(columns=Index([]))), - ], -) -def test_callable_usecols(all_parsers, usecols, expected): - # see gh-14154 - data = """AaA,bBb,CCC,ddd -0.056674973,8,True,a -2.613230982,2,False,b -3.568935038,7,False,a""" - parser = all_parsers - - result = parser.read_csv(StringIO(data), usecols=usecols) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize("usecols", [["a", "c"], lambda x: x in ["a", "c"]]) -def test_incomplete_first_row(all_parsers, usecols): - # see gh-6710 - data = "1,2\n1,2,3" - parser = all_parsers - names = ["a", "b", "c"] - expected = DataFrame({"a": [1, 1], "c": [np.nan, 3]}) - - result = parser.read_csv(StringIO(data), names=names, usecols=usecols) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize( - "data,usecols,kwargs,expected", - [ - # see gh-8985 - ( - "19,29,39\n" * 2 + "10,20,30,40", - [0, 1, 2], - {"header": None}, - DataFrame([[19, 29, 39], [19, 29, 39], [10, 20, 30]]), - ), - # see gh-9549 - ( - ("A,B,C\n1,2,3\n3,4,5\n1,2,4,5,1,6\n1,2,3,,,1,\n1,2,3\n5,6,7"), - ["A", "B", "C"], - {}, - DataFrame( - { - "A": [1, 3, 1, 1, 1, 5], - "B": [2, 4, 2, 2, 2, 6], - "C": [3, 5, 4, 3, 3, 7], - } - ), - ), - ], -) -def test_uneven_length_cols(all_parsers, data, usecols, kwargs, expected): - # see gh-8985 - parser = all_parsers - result = parser.read_csv(StringIO(data), usecols=usecols, **kwargs) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize( - "usecols,kwargs,expected,msg", - [ - ( - ["a", "b", "c", "d"], - {}, - DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}), - None, - ), - ( - ["a", "b", "c", "f"], - {}, - None, - _msg_validate_usecols_names.format(r"\['f'\]"), - ), - (["a", "b", "f"], {}, None, _msg_validate_usecols_names.format(r"\['f'\]")), - ( - ["a", "b", "f", "g"], - {}, - None, - _msg_validate_usecols_names.format(r"\[('f', 'g'|'g', 'f')\]"), - ), - # see gh-14671 - ( - None, - {"header": 0, "names": ["A", "B", "C", "D"]}, - DataFrame({"A": [1, 5], "B": [2, 6], "C": [3, 7], "D": [4, 8]}), - None, - ), - ( - ["A", "B", "C", "f"], - {"header": 0, "names": ["A", "B", "C", "D"]}, - None, - _msg_validate_usecols_names.format(r"\['f'\]"), - ), - ( - ["A", "B", "f"], - {"names": ["A", "B", "C", "D"]}, - None, - _msg_validate_usecols_names.format(r"\['f'\]"), - ), - ], -) -def test_raises_on_usecols_names_mismatch(all_parsers, usecols, kwargs, expected, msg): - data = "a,b,c,d\n1,2,3,4\n5,6,7,8" - kwargs.update(usecols=usecols) - parser = all_parsers - - if expected is None: - with pytest.raises(ValueError, match=msg): - parser.read_csv(StringIO(data), **kwargs) - else: - result = parser.read_csv(StringIO(data), **kwargs) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize("usecols", [["A", "C"], [0, 2]]) -def test_usecols_subset_names_mismatch_orig_columns(all_parsers, usecols): - data = "a,b,c,d\n1,2,3,4\n5,6,7,8" - names = ["A", "B", "C", "D"] - parser = all_parsers - - result = parser.read_csv(StringIO(data), header=0, names=names, usecols=usecols) - expected = DataFrame({"A": [1, 5], "C": [3, 7]}) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -@pytest.mark.parametrize("names", [None, ["a", "b"]]) -def test_usecols_indices_out_of_bounds(all_parsers, names): - # GH#25623 & GH 41130; enforced in 2.0 - parser = all_parsers - data = """ -a,b -1,2 - """ - with pytest.raises(ParserError, match="Defining usecols without of bounds"): - parser.read_csv(StringIO(data), usecols=[0, 2], names=names, header=0) - - -@skip_pyarrow -def test_usecols_additional_columns(all_parsers): - # GH#46997 - parser = all_parsers - usecols = lambda header: header.strip() in ["a", "b", "c"] - result = parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols) - expected = DataFrame({"a": ["x"], "b": "y"}) - tm.assert_frame_equal(result, expected) - - -@skip_pyarrow -def test_usecols_additional_columns_integer_columns(all_parsers): - # GH#46997 - parser = all_parsers - usecols = lambda header: header.strip() in ["0", "1"] - result = parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols) - expected = DataFrame({"0": ["x"], "1": "y"}) - tm.assert_frame_equal(result, expected) - - -def test_usecols_dtype(all_parsers): - parser = all_parsers - data = """ -col1,col2,col3 -a,1,x -b,2,y -""" - result = parser.read_csv( - StringIO(data), - usecols=["col1", "col2"], - dtype={"col1": "string", "col2": "uint8", "col3": "string"}, - ) - expected = DataFrame( - {"col1": array(["a", "b"]), "col2": np.array([1, 2], dtype="uint8")} - ) - tm.assert_frame_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_constructors.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_constructors.py deleted file mode 100644 index 7bd9e5fc5e293ac36e5f1c31072eb265f7b75681..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/scalar/timedelta/test_constructors.py +++ /dev/null @@ -1,532 +0,0 @@ -from datetime import timedelta -from itertools import product - -import numpy as np -import pytest - -from pandas._libs.tslibs import OutOfBoundsTimedelta -from pandas._libs.tslibs.dtypes import NpyDatetimeUnit - -from pandas import ( - NaT, - Timedelta, - offsets, - to_timedelta, -) - - -def test_construct_with_weeks_unit_overflow(): - # GH#47268 don't silently wrap around - with pytest.raises(OutOfBoundsTimedelta, match="without overflow"): - Timedelta(1000000000000000000, unit="W") - - with pytest.raises(OutOfBoundsTimedelta, match="without overflow"): - Timedelta(1000000000000000000.0, unit="W") - - -def test_construct_from_td64_with_unit(): - # ignore the unit, as it may cause silently overflows leading to incorrect - # results, and in non-overflow cases is irrelevant GH#46827 - obj = np.timedelta64(123456789000000000, "h") - - with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): - Timedelta(obj, unit="ps") - - with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): - Timedelta(obj, unit="ns") - - with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): - Timedelta(obj) - - -def test_from_td64_retain_resolution(): - # case where we retain millisecond resolution - obj = np.timedelta64(12345, "ms") - - td = Timedelta(obj) - assert td._value == obj.view("i8") - assert td._creso == NpyDatetimeUnit.NPY_FR_ms.value - - # Case where we cast to nearest-supported reso - obj2 = np.timedelta64(1234, "D") - td2 = Timedelta(obj2) - assert td2._creso == NpyDatetimeUnit.NPY_FR_s.value - assert td2 == obj2 - assert td2.days == 1234 - - # Case that _would_ overflow if we didn't support non-nano - obj3 = np.timedelta64(1000000000000000000, "us") - td3 = Timedelta(obj3) - assert td3.total_seconds() == 1000000000000 - assert td3._creso == NpyDatetimeUnit.NPY_FR_us.value - - -def test_from_pytimedelta_us_reso(): - # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that - td = timedelta(days=4, minutes=3) - result = Timedelta(td) - assert result.to_pytimedelta() == td - assert result._creso == NpyDatetimeUnit.NPY_FR_us.value - - -def test_from_tick_reso(): - tick = offsets.Nano() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ns.value - - tick = offsets.Micro() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_us.value - - tick = offsets.Milli() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ms.value - - tick = offsets.Second() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value - - # everything above Second gets cast to the closest supported reso: second - tick = offsets.Minute() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value - - tick = offsets.Hour() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value - - tick = offsets.Day() - assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value - - -def test_construction(): - expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8") - assert Timedelta(10, unit="d")._value == expected - assert Timedelta(10.0, unit="d")._value == expected - assert Timedelta("10 days")._value == expected - assert Timedelta(days=10)._value == expected - assert Timedelta(days=10.0)._value == expected - - expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8") - assert Timedelta("10 days 00:00:10")._value == expected - assert Timedelta(days=10, seconds=10)._value == expected - assert Timedelta(days=10, milliseconds=10 * 1000)._value == expected - assert Timedelta(days=10, microseconds=10 * 1000 * 1000)._value == expected - - # rounding cases - assert Timedelta(82739999850000)._value == 82739999850000 - assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000)) - assert Timedelta(123072001000000)._value == 123072001000000 - assert "1 days 10:11:12.001" in str(Timedelta(123072001000000)) - - # string conversion with/without leading zero - # GH#9570 - assert Timedelta("0:00:00") == timedelta(hours=0) - assert Timedelta("00:00:00") == timedelta(hours=0) - assert Timedelta("-1:00:00") == -timedelta(hours=1) - assert Timedelta("-01:00:00") == -timedelta(hours=1) - - # more strings & abbrevs - # GH#8190 - assert Timedelta("1 h") == timedelta(hours=1) - assert Timedelta("1 hour") == timedelta(hours=1) - assert Timedelta("1 hr") == timedelta(hours=1) - assert Timedelta("1 hours") == timedelta(hours=1) - assert Timedelta("-1 hours") == -timedelta(hours=1) - assert Timedelta("1 m") == timedelta(minutes=1) - assert Timedelta("1.5 m") == timedelta(seconds=90) - assert Timedelta("1 minute") == timedelta(minutes=1) - assert Timedelta("1 minutes") == timedelta(minutes=1) - assert Timedelta("1 s") == timedelta(seconds=1) - assert Timedelta("1 second") == timedelta(seconds=1) - assert Timedelta("1 seconds") == timedelta(seconds=1) - assert Timedelta("1 ms") == timedelta(milliseconds=1) - assert Timedelta("1 milli") == timedelta(milliseconds=1) - assert Timedelta("1 millisecond") == timedelta(milliseconds=1) - assert Timedelta("1 us") == timedelta(microseconds=1) - assert Timedelta("1 µs") == timedelta(microseconds=1) - assert Timedelta("1 micros") == timedelta(microseconds=1) - assert Timedelta("1 microsecond") == timedelta(microseconds=1) - assert Timedelta("1.5 microsecond") == Timedelta("00:00:00.000001500") - assert Timedelta("1 ns") == Timedelta("00:00:00.000000001") - assert Timedelta("1 nano") == Timedelta("00:00:00.000000001") - assert Timedelta("1 nanosecond") == Timedelta("00:00:00.000000001") - - # combos - assert Timedelta("10 days 1 hour") == timedelta(days=10, hours=1) - assert Timedelta("10 days 1 h") == timedelta(days=10, hours=1) - assert Timedelta("10 days 1 h 1m 1s") == timedelta( - days=10, hours=1, minutes=1, seconds=1 - ) - assert Timedelta("-10 days 1 h 1m 1s") == -timedelta( - days=10, hours=1, minutes=1, seconds=1 - ) - assert Timedelta("-10 days 1 h 1m 1s") == -timedelta( - days=10, hours=1, minutes=1, seconds=1 - ) - assert Timedelta("-10 days 1 h 1m 1s 3us") == -timedelta( - days=10, hours=1, minutes=1, seconds=1, microseconds=3 - ) - assert Timedelta("-10 days 1 h 1.5m 1s 3us") == -timedelta( - days=10, hours=1, minutes=1, seconds=31, microseconds=3 - ) - - # Currently invalid as it has a - on the hh:mm:dd part - # (only allowed on the days) - msg = "only leading negative signs are allowed" - with pytest.raises(ValueError, match=msg): - Timedelta("-10 days -1 h 1.5m 1s 3us") - - # only leading neg signs are allowed - with pytest.raises(ValueError, match=msg): - Timedelta("10 days -1 h 1.5m 1s 3us") - - # no units specified - msg = "no units specified" - with pytest.raises(ValueError, match=msg): - Timedelta("3.1415") - - # invalid construction - msg = "cannot construct a Timedelta" - with pytest.raises(ValueError, match=msg): - Timedelta() - - msg = "unit abbreviation w/o a number" - with pytest.raises(ValueError, match=msg): - Timedelta("foo") - - msg = ( - "cannot construct a Timedelta from " - "the passed arguments, allowed keywords are " - ) - with pytest.raises(ValueError, match=msg): - Timedelta(day=10) - - # floats - expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64( - 500, "ms" - ).astype("m8[ns]").view("i8") - assert Timedelta(10.5, unit="s")._value == expected - - # offset - assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2) - assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2) - assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2) - - # GH#11995: unicode - expected = Timedelta("1H") - result = Timedelta("1H") - assert result == expected - assert to_timedelta(offsets.Hour(2)) == Timedelta("0 days, 02:00:00") - - msg = "unit abbreviation w/o a number" - with pytest.raises(ValueError, match=msg): - Timedelta("foo bar") - - -@pytest.mark.parametrize( - "item", - list( - { - "days": "D", - "seconds": "s", - "microseconds": "us", - "milliseconds": "ms", - "minutes": "m", - "hours": "h", - "weeks": "W", - }.items() - ), -) -@pytest.mark.parametrize( - "npdtype", [np.int64, np.int32, np.int16, np.float64, np.float32, np.float16] -) -def test_td_construction_with_np_dtypes(npdtype, item): - # GH#8757: test construction with np dtypes - pykwarg, npkwarg = item - expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8") - assert Timedelta(**{pykwarg: npdtype(1)})._value == expected - - -@pytest.mark.parametrize( - "val", - [ - "1s", - "-1s", - "1us", - "-1us", - "1 day", - "-1 day", - "-23:59:59.999999", - "-1 days +23:59:59.999999", - "-1ns", - "1ns", - "-23:59:59.999999999", - ], -) -def test_td_from_repr_roundtrip(val): - # round-trip both for string and value - td = Timedelta(val) - assert Timedelta(td._value) == td - - assert Timedelta(str(td)) == td - assert Timedelta(td._repr_base(format="all")) == td - assert Timedelta(td._repr_base()) == td - - -def test_overflow_on_construction(): - # GH#3374 - value = Timedelta("1day")._value * 20169940 - msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow" - with pytest.raises(OutOfBoundsTimedelta, match=msg): - Timedelta(value) - - # xref GH#17637 - msg = "Cannot cast 139993 from D to 'ns' without overflow" - with pytest.raises(OutOfBoundsTimedelta, match=msg): - Timedelta(7 * 19999, unit="D") - - # used to overflow before non-ns support - td = Timedelta(timedelta(days=13 * 19999)) - assert td._creso == NpyDatetimeUnit.NPY_FR_us.value - assert td.days == 13 * 19999 - - -@pytest.mark.parametrize( - "val, unit", - [ - (15251, "W"), # 1 - (106752, "D"), # change from previous: - (2562048, "h"), # 0 hours - (153722868, "m"), # 13 minutes - (9223372037, "s"), # 44 seconds - ], -) -def test_construction_out_of_bounds_td64ns(val, unit): - # TODO: parametrize over units just above/below the implementation bounds - # once GH#38964 is resolved - - # Timedelta.max is just under 106752 days - td64 = np.timedelta64(val, unit) - assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong - - td = Timedelta(td64) - if unit != "M": - # with unit="M" the conversion to "s" is poorly defined - # (and numpy issues DeprecationWarning) - assert td.asm8 == td64 - assert td.asm8.dtype == "m8[s]" - msg = r"Cannot cast 1067\d\d days .* to unit='ns' without overflow" - with pytest.raises(OutOfBoundsTimedelta, match=msg): - td.as_unit("ns") - - # But just back in bounds and we are OK - assert Timedelta(td64 - 1) == td64 - 1 - - td64 *= -1 - assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong - - td2 = Timedelta(td64) - msg = r"Cannot cast -1067\d\d days .* to unit='ns' without overflow" - with pytest.raises(OutOfBoundsTimedelta, match=msg): - td2.as_unit("ns") - - # But just back in bounds and we are OK - assert Timedelta(td64 + 1) == td64 + 1 - - -@pytest.mark.parametrize( - "val, unit", - [ - (15251 * 10**9, "W"), - (106752 * 10**9, "D"), - (2562048 * 10**9, "h"), - (153722868 * 10**9, "m"), - ], -) -def test_construction_out_of_bounds_td64s(val, unit): - td64 = np.timedelta64(val, unit) - with pytest.raises(OutOfBoundsTimedelta, match=str(td64)): - Timedelta(td64) - - # But just back in bounds and we are OK - assert Timedelta(td64 - 10**9) == td64 - 10**9 - - -@pytest.mark.parametrize( - "fmt,exp", - [ - ( - "P6DT0H50M3.010010012S", - Timedelta( - days=6, - minutes=50, - seconds=3, - milliseconds=10, - microseconds=10, - nanoseconds=12, - ), - ), - ( - "P-6DT0H50M3.010010012S", - Timedelta( - days=-6, - minutes=50, - seconds=3, - milliseconds=10, - microseconds=10, - nanoseconds=12, - ), - ), - ("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)), - ("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)), - ("P0DT0H0M0.00001S", Timedelta(microseconds=10)), - ("P0DT0H0M0.001S", Timedelta(milliseconds=1)), - ("P0DT0H1M0S", Timedelta(minutes=1)), - ("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)), - ("PT1S", Timedelta(seconds=1)), - ("PT0S", Timedelta(seconds=0)), - ("P1WT0S", Timedelta(days=7, seconds=0)), - ("P1D", Timedelta(days=1)), - ("P1DT1H", Timedelta(days=1, hours=1)), - ("P1W", Timedelta(days=7)), - ("PT300S", Timedelta(seconds=300)), - ("P1DT0H0M00000000000S", Timedelta(days=1)), - ("PT-6H3M", Timedelta(hours=-6, minutes=3)), - ("-PT6H3M", Timedelta(hours=-6, minutes=-3)), - ("-PT-6H+3M", Timedelta(hours=6, minutes=-3)), - ], -) -def test_iso_constructor(fmt, exp): - assert Timedelta(fmt) == exp - - -@pytest.mark.parametrize( - "fmt", - [ - "PPPPPPPPPPPP", - "PDTHMS", - "P0DT999H999M999S", - "P1DT0H0M0.0000000000000S", - "P1DT0H0M0.S", - "P", - "-P", - ], -) -def test_iso_constructor_raises(fmt): - msg = f"Invalid ISO 8601 Duration format - {fmt}" - with pytest.raises(ValueError, match=msg): - Timedelta(fmt) - - -@pytest.mark.parametrize( - "constructed_td, conversion", - [ - (Timedelta(nanoseconds=100), "100ns"), - ( - Timedelta( - days=1, - hours=1, - minutes=1, - weeks=1, - seconds=1, - milliseconds=1, - microseconds=1, - nanoseconds=1, - ), - 694861001001001, - ), - (Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"), - (Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"), - (Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"), - ], -) -def test_td_constructor_on_nanoseconds(constructed_td, conversion): - # GH#9273 - assert constructed_td == Timedelta(conversion) - - -def test_td_constructor_value_error(): - msg = "Invalid type . Must be int or float." - with pytest.raises(TypeError, match=msg): - Timedelta(nanoseconds="abc") - - -def test_timedelta_constructor_identity(): - # Test for #30543 - expected = Timedelta(np.timedelta64(1, "s")) - result = Timedelta(expected) - assert result is expected - - -def test_timedelta_pass_td_and_kwargs_raises(): - # don't silently ignore the kwargs GH#48898 - td = Timedelta(days=1) - msg = ( - "Cannot pass both a Timedelta input and timedelta keyword arguments, " - r"got \['days'\]" - ) - with pytest.raises(ValueError, match=msg): - Timedelta(td, days=2) - - -@pytest.mark.parametrize( - "constructor, value, unit, expectation", - [ - (Timedelta, "10s", "ms", (ValueError, "unit must not be specified")), - (to_timedelta, "10s", "ms", (ValueError, "unit must not be specified")), - (to_timedelta, ["1", 2, 3], "s", (ValueError, "unit must not be specified")), - ], -) -def test_string_with_unit(constructor, value, unit, expectation): - exp, match = expectation - with pytest.raises(exp, match=match): - _ = constructor(value, unit=unit) - - -@pytest.mark.parametrize( - "value", - [ - "".join(elements) - for repetition in (1, 2) - for elements in product("+-, ", repeat=repetition) - ], -) -def test_string_without_numbers(value): - # GH39710 Timedelta input string with only symbols and no digits raises an error - msg = ( - "symbols w/o a number" - if value != "--" - else "only leading negative signs are allowed" - ) - with pytest.raises(ValueError, match=msg): - Timedelta(value) - - -def test_timedelta_new_npnat(): - # GH#48898 - nat = np.timedelta64("NaT", "h") - assert Timedelta(nat) is NaT - - -def test_subclass_respected(): - # GH#49579 - class MyCustomTimedelta(Timedelta): - pass - - td = MyCustomTimedelta("1 minute") - assert isinstance(td, MyCustomTimedelta) - - -def test_non_nano_value(): - # https://github.com/pandas-dev/pandas/issues/49076 - result = Timedelta(10, unit="D").as_unit("s").value - # `.value` shows nanoseconds, even though unit is 's' - assert result == 864000000000000 - - # out-of-nanoseconds-bounds `.value` raises informative message - msg = ( - r"Cannot convert Timedelta to nanoseconds without overflow. " - r"Use `.asm8.view\('i8'\)` to cast represent Timedelta in its " - r"own unit \(here, s\).$" - ) - td = Timedelta(1_000, "D").as_unit("s") * 1_000 - with pytest.raises(OverflowError, match=msg): - td.value - # check that the suggested workaround actually works - result = td.asm8.view("i8") - assert result == 86400000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/window/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/install/legacy.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/install/legacy.py deleted file mode 100644 index 5b7ef9017181e94a86b8985f7523feaea387f612..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_internal/operations/install/legacy.py +++ /dev/null @@ -1,120 +0,0 @@ -"""Legacy installation process, i.e. `setup.py install`. -""" - -import logging -import os -from distutils.util import change_root -from typing import List, Optional, Sequence - -from pip._internal.build_env import BuildEnvironment -from pip._internal.exceptions import InstallationError, LegacyInstallFailure -from pip._internal.models.scheme import Scheme -from pip._internal.utils.misc import ensure_dir -from pip._internal.utils.setuptools_build import make_setuptools_install_args -from pip._internal.utils.subprocess import runner_with_spinner_message -from pip._internal.utils.temp_dir import TempDirectory - -logger = logging.getLogger(__name__) - - -def write_installed_files_from_setuptools_record( - record_lines: List[str], - root: Optional[str], - req_description: str, -) -> None: - def prepend_root(path: str) -> str: - if root is None or not os.path.isabs(path): - return path - else: - return change_root(root, path) - - for line in record_lines: - directory = os.path.dirname(line) - if directory.endswith(".egg-info"): - egg_info_dir = prepend_root(directory) - break - else: - message = ( - "{} did not indicate that it installed an " - ".egg-info directory. Only setup.py projects " - "generating .egg-info directories are supported." - ).format(req_description) - raise InstallationError(message) - - new_lines = [] - for line in record_lines: - filename = line.strip() - if os.path.isdir(filename): - filename += os.path.sep - new_lines.append(os.path.relpath(prepend_root(filename), egg_info_dir)) - new_lines.sort() - ensure_dir(egg_info_dir) - inst_files_path = os.path.join(egg_info_dir, "installed-files.txt") - with open(inst_files_path, "w") as f: - f.write("\n".join(new_lines) + "\n") - - -def install( - install_options: List[str], - global_options: Sequence[str], - root: Optional[str], - home: Optional[str], - prefix: Optional[str], - use_user_site: bool, - pycompile: bool, - scheme: Scheme, - setup_py_path: str, - isolated: bool, - req_name: str, - build_env: BuildEnvironment, - unpacked_source_directory: str, - req_description: str, -) -> bool: - - header_dir = scheme.headers - - with TempDirectory(kind="record") as temp_dir: - try: - record_filename = os.path.join(temp_dir.path, "install-record.txt") - install_args = make_setuptools_install_args( - setup_py_path, - global_options=global_options, - install_options=install_options, - record_filename=record_filename, - root=root, - prefix=prefix, - header_dir=header_dir, - home=home, - use_user_site=use_user_site, - no_user_config=isolated, - pycompile=pycompile, - ) - - runner = runner_with_spinner_message( - f"Running setup.py install for {req_name}" - ) - with build_env: - runner( - cmd=install_args, - cwd=unpacked_source_directory, - ) - - if not os.path.exists(record_filename): - logger.debug("Record file %s not found", record_filename) - # Signal to the caller that we didn't install the new package - return False - - except Exception as e: - # Signal to the caller that we didn't install the new package - raise LegacyInstallFailure(package_details=req_name) from e - - # At this point, we have successfully installed the requirement. - - # We intentionally do not use any encoding to read the file because - # setuptools writes the file using distutils.file_util.write_file, - # which does not specify an encoding. - with open(record_filename) as f: - record_lines = f.read().splitlines() - - write_installed_files_from_setuptools_record(record_lines, root, req_description) - return True diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tenacity/tornadoweb.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tenacity/tornadoweb.py deleted file mode 100644 index 8f7731af0e62a985dbe4c77771a80525848e793c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/tenacity/tornadoweb.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2017 Elisey Zanko -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import typing - -from pip._vendor.tenacity import BaseRetrying -from pip._vendor.tenacity import DoAttempt -from pip._vendor.tenacity import DoSleep -from pip._vendor.tenacity import RetryCallState - -from tornado import gen - -if typing.TYPE_CHECKING: - from tornado.concurrent import Future - -_RetValT = typing.TypeVar("_RetValT") - - -class TornadoRetrying(BaseRetrying): - def __init__(self, sleep: "typing.Callable[[float], Future[None]]" = gen.sleep, **kwargs: typing.Any) -> None: - super().__init__(**kwargs) - self.sleep = sleep - - @gen.coroutine - def __call__( # type: ignore # Change signature from supertype - self, - fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]", - *args: typing.Any, - **kwargs: typing.Any, - ) -> "typing.Generator[typing.Any, typing.Any, _RetValT]": - self.begin() - - retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs) - while True: - do = self.iter(retry_state=retry_state) - if isinstance(do, DoAttempt): - try: - result = yield fn(*args, **kwargs) - except BaseException: # noqa: B902 - retry_state.set_exception(sys.exc_info()) - else: - retry_state.set_result(result) - elif isinstance(do, DoSleep): - retry_state.prepare_for_next_attempt() - yield self.sleep(do) - else: - raise gen.Return(do) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_vendor/more_itertools/more.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_vendor/more_itertools/more.py deleted file mode 100644 index 0f7d282aa5df08f3e2692bf1e51dfaaea60ae4ea..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_vendor/more_itertools/more.py +++ /dev/null @@ -1,3825 +0,0 @@ -import warnings - -from collections import Counter, defaultdict, deque, abc -from collections.abc import Sequence -from concurrent.futures import ThreadPoolExecutor -from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop -from itertools import ( - chain, - compress, - count, - cycle, - dropwhile, - groupby, - islice, - repeat, - starmap, - takewhile, - tee, - zip_longest, -) -from math import exp, factorial, floor, log -from queue import Empty, Queue -from random import random, randrange, uniform -from operator import itemgetter, mul, sub, gt, lt -from sys import hexversion, maxsize -from time import monotonic - -from .recipes import ( - consume, - flatten, - pairwise, - powerset, - take, - unique_everseen, -) - -__all__ = [ - 'AbortThread', - 'adjacent', - 'always_iterable', - 'always_reversible', - 'bucket', - 'callback_iter', - 'chunked', - 'circular_shifts', - 'collapse', - 'collate', - 'consecutive_groups', - 'consumer', - 'countable', - 'count_cycle', - 'mark_ends', - 'difference', - 'distinct_combinations', - 'distinct_permutations', - 'distribute', - 'divide', - 'exactly_n', - 'filter_except', - 'first', - 'groupby_transform', - 'ilen', - 'interleave_longest', - 'interleave', - 'intersperse', - 'islice_extended', - 'iterate', - 'ichunked', - 'is_sorted', - 'last', - 'locate', - 'lstrip', - 'make_decorator', - 'map_except', - 'map_reduce', - 'nth_or_last', - 'nth_permutation', - 'nth_product', - 'numeric_range', - 'one', - 'only', - 'padded', - 'partitions', - 'set_partitions', - 'peekable', - 'repeat_last', - 'replace', - 'rlocate', - 'rstrip', - 'run_length', - 'sample', - 'seekable', - 'SequenceView', - 'side_effect', - 'sliced', - 'sort_together', - 'split_at', - 'split_after', - 'split_before', - 'split_when', - 'split_into', - 'spy', - 'stagger', - 'strip', - 'substrings', - 'substrings_indexes', - 'time_limited', - 'unique_to_each', - 'unzip', - 'windowed', - 'with_iter', - 'UnequalIterablesError', - 'zip_equal', - 'zip_offset', - 'windowed_complete', - 'all_unique', - 'value_chain', - 'product_index', - 'combination_index', - 'permutation_index', -] - -_marker = object() - - -def chunked(iterable, n, strict=False): - """Break *iterable* into lists of length *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6], 3)) - [[1, 2, 3], [4, 5, 6]] - - By the default, the last yielded list will have fewer than *n* elements - if the length of *iterable* is not divisible by *n*: - - >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) - [[1, 2, 3], [4, 5, 6], [7, 8]] - - To use a fill-in value instead, see the :func:`grouper` recipe. - - If the length of *iterable* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - list is yielded. - - """ - iterator = iter(partial(take, n, iter(iterable)), []) - if strict: - - def ret(): - for chunk in iterator: - if len(chunk) != n: - raise ValueError('iterable is not divisible by n.') - yield chunk - - return iter(ret()) - else: - return iterator - - -def first(iterable, default=_marker): - """Return the first item of *iterable*, or *default* if *iterable* is - empty. - - >>> first([0, 1, 2, 3]) - 0 - >>> first([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - - :func:`first` is useful when you have a generator of expensive-to-retrieve - values and want any arbitrary one. It is marginally shorter than - ``next(iter(iterable), default)``. - - """ - try: - return next(iter(iterable)) - except StopIteration as e: - if default is _marker: - raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' - ) from e - return default - - -def last(iterable, default=_marker): - """Return the last item of *iterable*, or *default* if *iterable* is - empty. - - >>> last([0, 1, 2, 3]) - 3 - >>> last([], 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - try: - if isinstance(iterable, Sequence): - return iterable[-1] - # Work around https://bugs.python.org/issue38525 - elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): - return next(reversed(iterable)) - else: - return deque(iterable, maxlen=1)[-1] - except (IndexError, TypeError, StopIteration): - if default is _marker: - raise ValueError( - 'last() was called on an empty iterable, and no default was ' - 'provided.' - ) - return default - - -def nth_or_last(iterable, n, default=_marker): - """Return the nth or the last item of *iterable*, - or *default* if *iterable* is empty. - - >>> nth_or_last([0, 1, 2, 3], 2) - 2 - >>> nth_or_last([0, 1], 2) - 1 - >>> nth_or_last([], 0, 'some default') - 'some default' - - If *default* is not provided and there are no items in the iterable, - raise ``ValueError``. - """ - return last(islice(iterable, n + 1), default=default) - - -class peekable: - """Wrap an iterator to allow lookahead and prepending elements. - - Call :meth:`peek` on the result to get the value that will be returned - by :func:`next`. This won't advance the iterator: - - >>> p = peekable(['a', 'b']) - >>> p.peek() - 'a' - >>> next(p) - 'a' - - Pass :meth:`peek` a default value to return that instead of raising - ``StopIteration`` when the iterator is exhausted. - - >>> p = peekable([]) - >>> p.peek('hi') - 'hi' - - peekables also offer a :meth:`prepend` method, which "inserts" items - at the head of the iterable: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> p.peek() - 11 - >>> list(p) - [11, 12, 1, 2, 3] - - peekables can be indexed. Index 0 is the item that will be returned by - :func:`next`, index 1 is the item after that, and so on: - The values up to the given index will be cached. - - >>> p = peekable(['a', 'b', 'c', 'd']) - >>> p[0] - 'a' - >>> p[1] - 'b' - >>> next(p) - 'a' - - Negative indexes are supported, but be aware that they will cache the - remaining items in the source iterator, which may require significant - storage. - - To check whether a peekable is exhausted, check its truth value: - - >>> p = peekable(['a', 'b']) - >>> if p: # peekable has items - ... list(p) - ['a', 'b'] - >>> if not p: # peekable is exhausted - ... list(p) - [] - - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self._cache = deque() - - def __iter__(self): - return self - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - """Return the item that will be next returned from ``next()``. - - Return ``default`` if there are no items left. If ``default`` is not - provided, raise ``StopIteration``. - - """ - if not self._cache: - try: - self._cache.append(next(self._it)) - except StopIteration: - if default is _marker: - raise - return default - return self._cache[0] - - def prepend(self, *items): - """Stack up items to be the next ones returned from ``next()`` or - ``self.peek()``. The items will be returned in - first in, first out order:: - - >>> p = peekable([1, 2, 3]) - >>> p.prepend(10, 11, 12) - >>> next(p) - 10 - >>> list(p) - [11, 12, 1, 2, 3] - - It is possible, by prepending items, to "resurrect" a peekable that - previously raised ``StopIteration``. - - >>> p = peekable([]) - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - >>> p.prepend(1) - >>> next(p) - 1 - >>> next(p) - Traceback (most recent call last): - ... - StopIteration - - """ - self._cache.extendleft(reversed(items)) - - def __next__(self): - if self._cache: - return self._cache.popleft() - - return next(self._it) - - def _get_slice(self, index): - # Normalize the slice's arguments - step = 1 if (index.step is None) else index.step - if step > 0: - start = 0 if (index.start is None) else index.start - stop = maxsize if (index.stop is None) else index.stop - elif step < 0: - start = -1 if (index.start is None) else index.start - stop = (-maxsize - 1) if (index.stop is None) else index.stop - else: - raise ValueError('slice step cannot be zero') - - # If either the start or stop index is negative, we'll need to cache - # the rest of the iterable in order to slice from the right side. - if (start < 0) or (stop < 0): - self._cache.extend(self._it) - # Otherwise we'll need to find the rightmost index and cache to that - # point. - else: - n = min(max(start, stop) + 1, maxsize) - cache_len = len(self._cache) - if n >= cache_len: - self._cache.extend(islice(self._it, n - cache_len)) - - return list(self._cache)[index] - - def __getitem__(self, index): - if isinstance(index, slice): - return self._get_slice(index) - - cache_len = len(self._cache) - if index < 0: - self._cache.extend(self._it) - elif index >= cache_len: - self._cache.extend(islice(self._it, index + 1 - cache_len)) - - return self._cache[index] - - -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - -def consumer(func): - """Decorator that automatically advances a PEP-342-style "reverse iterator" - to its first yield point so you don't have to call ``next()`` on it - manually. - - >>> @consumer - ... def tally(): - ... i = 0 - ... while True: - ... print('Thing number %s is %s.' % (i, (yield))) - ... i += 1 - ... - >>> t = tally() - >>> t.send('red') - Thing number 0 is red. - >>> t.send('fish') - Thing number 1 is fish. - - Without the decorator, you would have to call ``next(t)`` before - ``t.send()`` could be used. - - """ - - @wraps(func) - def wrapper(*args, **kwargs): - gen = func(*args, **kwargs) - next(gen) - return gen - - return wrapper - - -def ilen(iterable): - """Return the number of items in *iterable*. - - >>> ilen(x for x in range(1000000) if x % 3 == 0) - 333334 - - This consumes the iterable, so handle with care. - - """ - # This approach was selected because benchmarks showed it's likely the - # fastest of the known implementations at the time of writing. - # See GitHub tracker: #236, #230. - counter = count() - deque(zip(iterable, counter), maxlen=0) - return next(counter) - - -def iterate(func, start): - """Return ``start``, ``func(start)``, ``func(func(start))``, ... - - >>> from itertools import islice - >>> list(islice(iterate(lambda x: 2*x, 1), 10)) - [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] - - """ - while True: - yield start - start = func(start) - - -def with_iter(context_manager): - """Wrap an iterable in a ``with`` statement, so it closes once exhausted. - - For example, this will close the file when the iterator is exhausted:: - - upper_lines = (line.upper() for line in with_iter(open('foo'))) - - Any context manager which returns an iterable is a candidate for - ``with_iter``. - - """ - with context_manager as iterable: - yield from iterable - - -def one(iterable, too_short=None, too_long=None): - """Return the first item from *iterable*, which is expected to contain only - that item. Raise an exception if *iterable* is empty or has more than one - item. - - :func:`one` is useful for ensuring that an iterable contains only one item. - For example, it can be used to retrieve the result of a database query - that is expected to return a single row. - - If *iterable* is empty, ``ValueError`` will be raised. You may specify a - different exception with the *too_short* keyword: - - >>> it = [] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: too many items in iterable (expected 1)' - >>> too_short = IndexError('too few items') - >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - IndexError: too few items - - Similarly, if *iterable* contains more than one item, ``ValueError`` will - be raised. You may specify a different exception with the *too_long* - keyword: - - >>> it = ['too', 'many'] - >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 'too', - 'many', and perhaps more. - >>> too_long = RuntimeError - >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - RuntimeError - - Note that :func:`one` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check iterable - contents less destructively. - - """ - it = iter(iterable) - - try: - first_value = next(it) - except StopIteration as e: - raise ( - too_short or ValueError('too few items in iterable (expected 1)') - ) from e - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def distinct_permutations(iterable, r=None): - """Yield successive distinct permutations of the elements in *iterable*. - - >>> sorted(distinct_permutations([1, 0, 1])) - [(0, 1, 1), (1, 0, 1), (1, 1, 0)] - - Equivalent to ``set(permutations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - Duplicate permutations arise when there are duplicated elements in the - input iterable. The number of items returned is - `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of - items input, and each `x_i` is the count of a distinct item in the input - sequence. - - If *r* is given, only the *r*-length permutations are yielded. - - >>> sorted(distinct_permutations([1, 0, 1], r=2)) - [(0, 1), (1, 0), (1, 1)] - >>> sorted(distinct_permutations(range(3), r=2)) - [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] - - """ - # Algorithm: https://w.wiki/Qai - def _full(A): - while True: - # Yield the permutation we have - yield tuple(A) - - # Find the largest index i such that A[i] < A[i + 1] - for i in range(size - 2, -1, -1): - if A[i] < A[i + 1]: - break - # If no such index exists, this permutation is the last one - else: - return - - # Find the largest index j greater than j such that A[i] < A[j] - for j in range(size - 1, i, -1): - if A[i] < A[j]: - break - - # Swap the value of A[i] with that of A[j], then reverse the - # sequence from A[i + 1] to form the new permutation - A[i], A[j] = A[j], A[i] - A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1] - - # Algorithm: modified from the above - def _partial(A, r): - # Split A into the first r items and the last r items - head, tail = A[:r], A[r:] - right_head_indexes = range(r - 1, -1, -1) - left_tail_indexes = range(len(tail)) - - while True: - # Yield the permutation we have - yield tuple(head) - - # Starting from the right, find the first index of the head with - # value smaller than the maximum value of the tail - call it i. - pivot = tail[-1] - for i in right_head_indexes: - if head[i] < pivot: - break - pivot = head[i] - else: - return - - # Starting from the left, find the first value of the tail - # with a value greater than head[i] and swap. - for j in left_tail_indexes: - if tail[j] > head[i]: - head[i], tail[j] = tail[j], head[i] - break - # If we didn't find one, start from the right and find the first - # index of the head with a value greater than head[i] and swap. - else: - for j in right_head_indexes: - if head[j] > head[i]: - head[i], head[j] = head[j], head[i] - break - - # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)] - tail += head[: i - r : -1] # head[i + 1:][::-1] - i += 1 - head[i:], tail[:] = tail[: r - i], tail[r - i :] - - items = sorted(iterable) - - size = len(items) - if r is None: - r = size - - if 0 < r <= size: - return _full(items) if (r == size) else _partial(items, r) - - return iter(() if r else ((),)) - - -def intersperse(e, iterable, n=1): - """Intersperse filler element *e* among the items in *iterable*, leaving - *n* items between each filler element. - - >>> list(intersperse('!', [1, 2, 3, 4, 5])) - [1, '!', 2, '!', 3, '!', 4, '!', 5] - - >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2)) - [1, 2, None, 3, 4, None, 5] - - """ - if n == 0: - raise ValueError('n must be > 0') - elif n == 1: - # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2... - # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2... - return islice(interleave(repeat(e), iterable), 1, None) - else: - # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]... - # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]... - # flatten(...) -> x_0, x_1, e, x_2, x_3... - filler = repeat([e]) - chunks = chunked(iterable, n) - return flatten(islice(interleave(filler, chunks), 1, None)) - - -def unique_to_each(*iterables): - """Return the elements from each of the input iterables that aren't in the - other input iterables. - - For example, suppose you have a set of packages, each with a set of - dependencies:: - - {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}} - - If you remove one package, which dependencies can also be removed? - - If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not - associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for - ``pkg_2``, and ``D`` is only needed for ``pkg_3``:: - - >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'}) - [['A'], ['C'], ['D']] - - If there are duplicates in one input iterable that aren't in the others - they will be duplicated in the output. Input order is preserved:: - - >>> unique_to_each("mississippi", "missouri") - [['p', 'p'], ['o', 'u', 'r']] - - It is assumed that the elements of each iterable are hashable. - - """ - pool = [list(it) for it in iterables] - counts = Counter(chain.from_iterable(map(set, pool))) - uniques = {element for element in counts if counts[element] == 1} - return [list(filter(uniques.__contains__, it)) for it in pool] - - -def windowed(seq, n, fillvalue=None, step=1): - """Return a sliding window of width *n* over the given iterable. - - >>> all_windows = windowed([1, 2, 3, 4, 5], 3) - >>> list(all_windows) - [(1, 2, 3), (2, 3, 4), (3, 4, 5)] - - When the window is larger than the iterable, *fillvalue* is used in place - of missing values: - - >>> list(windowed([1, 2, 3], 4)) - [(1, 2, 3, None)] - - Each window will advance in increments of *step*: - - >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2)) - [(1, 2, 3), (3, 4, 5), (5, 6, '!')] - - To slide into the iterable's items, use :func:`chain` to add filler items - to the left: - - >>> iterable = [1, 2, 3, 4] - >>> n = 3 - >>> padding = [None] * (n - 1) - >>> list(windowed(chain(padding, iterable), 3)) - [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)] - """ - if n < 0: - raise ValueError('n must be >= 0') - if n == 0: - yield tuple() - return - if step < 1: - raise ValueError('step must be >= 1') - - window = deque(maxlen=n) - i = n - for _ in map(window.append, seq): - i -= 1 - if not i: - i = step - yield tuple(window) - - size = len(window) - if size < n: - yield tuple(chain(window, repeat(fillvalue, n - size))) - elif 0 < i < min(step, n): - window += (fillvalue,) * i - yield tuple(window) - - -def substrings(iterable): - """Yield all of the substrings of *iterable*. - - >>> [''.join(s) for s in substrings('more')] - ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more'] - - Note that non-string iterables can also be subdivided. - - >>> list(substrings([0, 1, 2])) - [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)] - - """ - # The length-1 substrings - seq = [] - for item in iter(iterable): - seq.append(item) - yield (item,) - seq = tuple(seq) - item_count = len(seq) - - # And the rest - for n in range(2, item_count + 1): - for i in range(item_count - n + 1): - yield seq[i : i + n] - - -def substrings_indexes(seq, reverse=False): - """Yield all substrings and their positions in *seq* - - The items yielded will be a tuple of the form ``(substr, i, j)``, where - ``substr == seq[i:j]``. - - This function only works for iterables that support slicing, such as - ``str`` objects. - - >>> for item in substrings_indexes('more'): - ... print(item) - ('m', 0, 1) - ('o', 1, 2) - ('r', 2, 3) - ('e', 3, 4) - ('mo', 0, 2) - ('or', 1, 3) - ('re', 2, 4) - ('mor', 0, 3) - ('ore', 1, 4) - ('more', 0, 4) - - Set *reverse* to ``True`` to yield the same items in the opposite order. - - - """ - r = range(1, len(seq) + 1) - if reverse: - r = reversed(r) - return ( - (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1) - ) - - -class bucket: - """Wrap *iterable* and return an object that buckets it iterable into - child iterables based on a *key* function. - - >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] - >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character - >>> sorted(list(s)) # Get the keys - ['a', 'b', 'c'] - >>> a_iterable = s['a'] - >>> next(a_iterable) - 'a1' - >>> next(a_iterable) - 'a2' - >>> list(s['b']) - ['b1', 'b2', 'b3'] - - The original iterable will be advanced and its items will be cached until - they are used by the child iterables. This may require significant storage. - - By default, attempting to select a bucket to which no items belong will - exhaust the iterable and cache all values. - If you specify a *validator* function, selected buckets will instead be - checked against it. - - >>> from itertools import count - >>> it = count(1, 2) # Infinite sequence of odd numbers - >>> key = lambda x: x % 10 # Bucket by last digit - >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only - >>> s = bucket(it, key=key, validator=validator) - >>> 2 in s - False - >>> list(s[2]) - [] - - """ - - def __init__(self, iterable, key, validator=None): - self._it = iter(iterable) - self._key = key - self._cache = defaultdict(deque) - self._validator = validator or (lambda x: True) - - def __contains__(self, value): - if not self._validator(value): - return False - - try: - item = next(self[value]) - except StopIteration: - return False - else: - self._cache[value].appendleft(item) - - return True - - def _get_values(self, value): - """ - Helper to yield items from the parent iterator that match *value*. - Items that don't match are stored in the local cache as they - are encountered. - """ - while True: - # If we've cached some items that match the target value, emit - # the first one and evict it from the cache. - if self._cache[value]: - yield self._cache[value].popleft() - # Otherwise we need to advance the parent iterator to search for - # a matching item, caching the rest. - else: - while True: - try: - item = next(self._it) - except StopIteration: - return - item_value = self._key(item) - if item_value == value: - yield item - break - elif self._validator(item_value): - self._cache[item_value].append(item) - - def __iter__(self): - for item in self._it: - item_value = self._key(item) - if self._validator(item_value): - self._cache[item_value].append(item) - - yield from self._cache.keys() - - def __getitem__(self, value): - if not self._validator(value): - return iter(()) - - return self._get_values(value) - - -def spy(iterable, n=1): - """Return a 2-tuple with a list containing the first *n* elements of - *iterable*, and an iterator with the same items as *iterable*. - This allows you to "look ahead" at the items in the iterable without - advancing it. - - There is one item in the list by default: - - >>> iterable = 'abcdefg' - >>> head, iterable = spy(iterable) - >>> head - ['a'] - >>> list(iterable) - ['a', 'b', 'c', 'd', 'e', 'f', 'g'] - - You may use unpacking to retrieve items instead of lists: - - >>> (head,), iterable = spy('abcdefg') - >>> head - 'a' - >>> (first, second), iterable = spy('abcdefg', 2) - >>> first - 'a' - >>> second - 'b' - - The number of items requested can be larger than the number of items in - the iterable: - - >>> iterable = [1, 2, 3, 4, 5] - >>> head, iterable = spy(iterable, 10) - >>> head - [1, 2, 3, 4, 5] - >>> list(iterable) - [1, 2, 3, 4, 5] - - """ - it = iter(iterable) - head = take(n, it) - - return head.copy(), chain(head, it) - - -def interleave(*iterables): - """Return a new iterable yielding from each iterable in turn, - until the shortest is exhausted. - - >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7] - - For a version that doesn't terminate after the shortest iterable is - exhausted, see :func:`interleave_longest`. - - """ - return chain.from_iterable(zip(*iterables)) - - -def interleave_longest(*iterables): - """Return a new iterable yielding from each iterable in turn, - skipping any that are exhausted. - - >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8])) - [1, 4, 6, 2, 5, 7, 3, 8] - - This function produces the same output as :func:`roundrobin`, but may - perform better for some inputs (in particular when the number of iterables - is large). - - """ - i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) - return (x for x in i if x is not _marker) - - -def collapse(iterable, base_type=None, levels=None): - """Flatten an iterable with multiple levels of nesting (e.g., a list of - lists of tuples) into non-iterable types. - - >>> iterable = [(1, 2), ([3, 4], [[5], [6]])] - >>> list(collapse(iterable)) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and - will not be collapsed. - - To avoid collapsing other types, specify *base_type*: - - >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']] - >>> list(collapse(iterable, base_type=tuple)) - ['ab', ('cd', 'ef'), 'gh', 'ij'] - - Specify *levels* to stop flattening after a certain level: - - >>> iterable = [('a', ['b']), ('c', ['d'])] - >>> list(collapse(iterable)) # Fully flattened - ['a', 'b', 'c', 'd'] - >>> list(collapse(iterable, levels=1)) # Only one level flattened - ['a', ['b'], 'c', ['d']] - - """ - - def walk(node, level): - if ( - ((levels is not None) and (level > levels)) - or isinstance(node, (str, bytes)) - or ((base_type is not None) and isinstance(node, base_type)) - ): - yield node - return - - try: - tree = iter(node) - except TypeError: - yield node - return - else: - for child in tree: - yield from walk(child, level + 1) - - yield from walk(iterable, 0) - - -def side_effect(func, iterable, chunk_size=None, before=None, after=None): - """Invoke *func* on each item in *iterable* (or on each *chunk_size* group - of items) before yielding the item. - - `func` must be a function that takes a single argument. Its return value - will be discarded. - - *before* and *after* are optional functions that take no arguments. They - will be executed before iteration starts and after it ends, respectively. - - `side_effect` can be used for logging, updating progress bars, or anything - that is not functionally "pure." - - Emitting a status message: - - >>> from more_itertools import consume - >>> func = lambda item: print('Received {}'.format(item)) - >>> consume(side_effect(func, range(2))) - Received 0 - Received 1 - - Operating on chunks of items: - - >>> pair_sums = [] - >>> func = lambda chunk: pair_sums.append(sum(chunk)) - >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2)) - [0, 1, 2, 3, 4, 5] - >>> list(pair_sums) - [1, 5, 9] - - Writing to a file-like object: - - >>> from io import StringIO - >>> from more_itertools import consume - >>> f = StringIO() - >>> func = lambda x: print(x, file=f) - >>> before = lambda: print(u'HEADER', file=f) - >>> after = f.close - >>> it = [u'a', u'b', u'c'] - >>> consume(side_effect(func, it, before=before, after=after)) - >>> f.closed - True - - """ - try: - if before is not None: - before() - - if chunk_size is None: - for item in iterable: - func(item) - yield item - else: - for chunk in chunked(iterable, chunk_size): - func(chunk) - yield from chunk - finally: - if after is not None: - after() - - -def sliced(seq, n, strict=False): - """Yield slices of length *n* from the sequence *seq*. - - >>> list(sliced((1, 2, 3, 4, 5, 6), 3)) - [(1, 2, 3), (4, 5, 6)] - - By the default, the last yielded slice will have fewer than *n* elements - if the length of *seq* is not divisible by *n*: - - >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3)) - [(1, 2, 3), (4, 5, 6), (7, 8)] - - If the length of *seq* is not divisible by *n* and *strict* is - ``True``, then ``ValueError`` will be raised before the last - slice is yielded. - - This function will only work for iterables that support slicing. - For non-sliceable iterables, see :func:`chunked`. - - """ - iterator = takewhile(len, (seq[i : i + n] for i in count(0, n))) - if strict: - - def ret(): - for _slice in iterator: - if len(_slice) != n: - raise ValueError("seq is not divisible by n.") - yield _slice - - return iter(ret()) - else: - return iterator - - -def split_at(iterable, pred, maxsplit=-1, keep_separator=False): - """Yield lists of items from *iterable*, where each list is delimited by - an item where callable *pred* returns ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b')) - [['a'], ['c', 'd', 'c'], ['a']] - - >>> list(split_at(range(10), lambda n: n % 2 == 1)) - [[0], [2], [4], [6], [8], []] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2)) - [[0], [2], [4, 5, 6, 7, 8, 9]] - - By default, the delimiting items are not included in the output. - The include them, set *keep_separator* to ``True``. - - >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True)) - [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item): - yield buf - if keep_separator: - yield [item] - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - else: - buf.append(item) - yield buf - - -def split_before(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends just before - an item for which callable *pred* returns ``True``: - - >>> list(split_before('OneTwo', lambda s: s.isupper())) - [['O', 'n', 'e'], ['T', 'w', 'o']] - - >>> list(split_before(range(10), lambda n: n % 3 == 0)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]] - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield [item] + list(it) - return - buf = [] - maxsplit -= 1 - buf.append(item) - if buf: - yield buf - - -def split_after(iterable, pred, maxsplit=-1): - """Yield lists of items from *iterable*, where each list ends with an - item where callable *pred* returns ``True``: - - >>> list(split_after('one1two2', lambda s: s.isdigit())) - [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']] - - >>> list(split_after(range(10), lambda n: n % 3 == 0)) - [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2)) - [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - buf = [] - it = iter(iterable) - for item in it: - buf.append(item) - if pred(item) and buf: - yield buf - if maxsplit == 1: - yield list(it) - return - buf = [] - maxsplit -= 1 - if buf: - yield buf - - -def split_when(iterable, pred, maxsplit=-1): - """Split *iterable* into pieces based on the output of *pred*. - *pred* should be a function that takes successive pairs of items and - returns ``True`` if the iterable should be split in between them. - - For example, to find runs of increasing numbers, split the iterable when - element ``i`` is larger than element ``i + 1``: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y)) - [[1, 2, 3, 3], [2, 5], [2, 4], [2]] - - At most *maxsplit* splits are done. If *maxsplit* is not specified or -1, - then there is no limit on the number of splits: - - >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], - ... lambda x, y: x > y, maxsplit=2)) - [[1, 2, 3, 3], [2, 5], [2, 4, 2]] - - """ - if maxsplit == 0: - yield list(iterable) - return - - it = iter(iterable) - try: - cur_item = next(it) - except StopIteration: - return - - buf = [cur_item] - for next_item in it: - if pred(cur_item, next_item): - yield buf - if maxsplit == 1: - yield [next_item] + list(it) - return - buf = [] - maxsplit -= 1 - - buf.append(next_item) - cur_item = next_item - - yield buf - - -def split_into(iterable, sizes): - """Yield a list of sequential items from *iterable* of length 'n' for each - integer 'n' in *sizes*. - - >>> list(split_into([1,2,3,4,5,6], [1,2,3])) - [[1], [2, 3], [4, 5, 6]] - - If the sum of *sizes* is smaller than the length of *iterable*, then the - remaining items of *iterable* will not be returned. - - >>> list(split_into([1,2,3,4,5,6], [2,3])) - [[1, 2], [3, 4, 5]] - - If the sum of *sizes* is larger than the length of *iterable*, fewer items - will be returned in the iteration that overruns *iterable* and further - lists will be empty: - - >>> list(split_into([1,2,3,4], [1,2,3,4])) - [[1], [2, 3], [4], []] - - When a ``None`` object is encountered in *sizes*, the returned list will - contain items up to the end of *iterable* the same way that itertools.slice - does: - - >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) - [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] - - :func:`split_into` can be useful for grouping a series of items where the - sizes of the groups are not uniform. An example would be where in a row - from a table, multiple columns represent elements of the same feature - (e.g. a point represented by x,y,z) but, the format is not the same for - all columns. - """ - # convert the iterable argument into an iterator so its contents can - # be consumed by islice in case it is a generator - it = iter(iterable) - - for size in sizes: - if size is None: - yield list(it) - return - else: - yield list(islice(it, size)) - - -def padded(iterable, fillvalue=None, n=None, next_multiple=False): - """Yield the elements from *iterable*, followed by *fillvalue*, such that - at least *n* items are emitted. - - >>> list(padded([1, 2, 3], '?', 5)) - [1, 2, 3, '?', '?'] - - If *next_multiple* is ``True``, *fillvalue* will be emitted until the - number of items emitted is a multiple of *n*:: - - >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True)) - [1, 2, 3, 4, None, None] - - If *n* is ``None``, *fillvalue* will be emitted indefinitely. - - """ - it = iter(iterable) - if n is None: - yield from chain(it, repeat(fillvalue)) - elif n < 1: - raise ValueError('n must be at least 1') - else: - item_count = 0 - for item in it: - yield item - item_count += 1 - - remaining = (n - item_count) % n if next_multiple else n - item_count - for _ in range(remaining): - yield fillvalue - - -def repeat_last(iterable, default=None): - """After the *iterable* is exhausted, keep yielding its last element. - - >>> list(islice(repeat_last(range(3)), 5)) - [0, 1, 2, 2, 2] - - If the iterable is empty, yield *default* forever:: - - >>> list(islice(repeat_last(range(0), 42), 5)) - [42, 42, 42, 42, 42] - - """ - item = _marker - for item in iterable: - yield item - final = default if item is _marker else item - yield from repeat(final) - - -def distribute(n, iterable): - """Distribute the items from *iterable* among *n* smaller iterables. - - >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 3, 5] - >>> list(group_2) - [2, 4, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 4, 7], [2, 5], [3, 6]] - - If the length of *iterable* is smaller than *n*, then the last returned - iterables will be empty: - - >>> children = distribute(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function uses :func:`itertools.tee` and may require significant - storage. If you need the order items in the smaller iterables to match the - original iterable, see :func:`divide`. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - children = tee(iterable, n) - return [islice(it, index, None, n) for index, it in enumerate(children)] - - -def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): - """Yield tuples whose elements are offset from *iterable*. - The amount by which the `i`-th item in each tuple is offset is given by - the `i`-th item in *offsets*. - - >>> list(stagger([0, 1, 2, 3])) - [(None, 0, 1), (0, 1, 2), (1, 2, 3)] - >>> list(stagger(range(8), offsets=(0, 2, 4))) - [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)] - - By default, the sequence will end when the final element of a tuple is the - last item in the iterable. To continue until the first element of a tuple - is the last item in the iterable, set *longest* to ``True``:: - - >>> list(stagger([0, 1, 2, 3], longest=True)) - [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - children = tee(iterable, len(offsets)) - - return zip_offset( - *children, offsets=offsets, longest=longest, fillvalue=fillvalue - ) - - -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def zip_equal(*iterables): - """``zip`` the input *iterables* together, but raise - ``UnequalIterablesError`` if they aren't all the same length. - - >>> it_1 = range(3) - >>> it_2 = iter('abc') - >>> list(zip_equal(it_1, it_2)) - [(0, 'a'), (1, 'b'), (2, 'c')] - - >>> it_1 = range(3) - >>> it_2 = iter('abcd') - >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - more_itertools.more.UnequalIterablesError: Iterables have different - lengths - - """ - if hexversion >= 0x30A00A6: - warnings.warn( - ( - 'zip_equal will be removed in a future version of ' - 'more-itertools. Use the builtin zip function with ' - 'strict=True instead.' - ), - DeprecationWarning, - ) - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - -def zip_offset(*iterables, offsets, longest=False, fillvalue=None): - """``zip`` the input *iterables* together, but offset the `i`-th iterable - by the `i`-th item in *offsets*. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1))) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')] - - This can be used as a lightweight alternative to SciPy or pandas to analyze - data sets in which some series have a lead or lag relationship. - - By default, the sequence will end when the shortest iterable is exhausted. - To continue until the longest iterable is exhausted, set *longest* to - ``True``. - - >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True)) - [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')] - - By default, ``None`` will be used to replace offsets beyond the end of the - sequence. Specify *fillvalue* to use some other value. - - """ - if len(iterables) != len(offsets): - raise ValueError("Number of iterables and offsets didn't match") - - staggered = [] - for it, n in zip(iterables, offsets): - if n < 0: - staggered.append(chain(repeat(fillvalue, -n), it)) - elif n > 0: - staggered.append(islice(it, n, None)) - else: - staggered.append(it) - - if longest: - return zip_longest(*staggered, fillvalue=fillvalue) - - return zip(*staggered) - - -def sort_together(iterables, key_list=(0,), key=None, reverse=False): - """Return the input iterables sorted together, with *key_list* as the - priority for sorting. All iterables are trimmed to the length of the - shortest one. - - This can be used like the sorting function in a spreadsheet. If each - iterable represents a column of data, the key list determines which - columns are used for sorting. - - By default, all iterables are sorted using the ``0``-th iterable:: - - >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')] - >>> sort_together(iterables) - [(1, 2, 3, 4), ('d', 'c', 'b', 'a')] - - Set a different key list to sort according to another iterable. - Specifying multiple keys dictates how ties are broken:: - - >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')] - >>> sort_together(iterables, key_list=(1, 2)) - [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')] - - To sort by a function of the elements of the iterable, pass a *key* - function. Its arguments are the elements of the iterables corresponding to - the key list:: - - >>> names = ('a', 'b', 'c') - >>> lengths = (1, 2, 3) - >>> widths = (5, 2, 1) - >>> def area(length, width): - ... return length * width - >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area) - [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)] - - Set *reverse* to ``True`` to sort in descending order. - - >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) - [(3, 2, 1), ('a', 'b', 'c')] - - """ - if key is None: - # if there is no key function, the key argument to sorted is an - # itemgetter - key_argument = itemgetter(*key_list) - else: - # if there is a key function, call it with the items at the offsets - # specified by the key function as arguments - key_list = list(key_list) - if len(key_list) == 1: - # if key_list contains a single item, pass the item at that offset - # as the only argument to the key function - key_offset = key_list[0] - key_argument = lambda zipped_items: key(zipped_items[key_offset]) - else: - # if key_list contains multiple items, use itemgetter to return a - # tuple of items, which we pass as *args to the key function - get_key_items = itemgetter(*key_list) - key_argument = lambda zipped_items: key( - *get_key_items(zipped_items) - ) - - return list( - zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) - ) - - -def unzip(iterable): - """The inverse of :func:`zip`, this function disaggregates the elements - of the zipped *iterable*. - - The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the - length of the remaining elements. - - >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> letters, numbers = unzip(iterable) - >>> list(letters) - ['a', 'b', 'c', 'd'] - >>> list(numbers) - [1, 2, 3, 4] - - This is similar to using ``zip(*iterable)``, but it avoids reading - *iterable* into memory. Note, however, that this function uses - :func:`itertools.tee` and thus may require significant storage. - - """ - head, iterable = spy(iter(iterable)) - if not head: - # empty iterable, e.g. zip([], [], []) - return () - # spy returns a one-length iterable as head - head = head[0] - iterables = tee(iterable, len(head)) - - def itemgetter(i): - def getter(obj): - try: - return obj[i] - except IndexError: - # basically if we have an iterable like - # iter([(1, 2, 3), (4, 5), (6,)]) - # the second unzipped iterable would fail at the third tuple - # since it would try to access tup[1] - # same with the third unzipped iterable and the second tuple - # to support these "improperly zipped" iterables, - # we create a custom itemgetter - # which just stops the unzipped iterables - # at first length mismatch - raise StopIteration - - return getter - - return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) - - -def divide(n, iterable): - """Divide the elements from *iterable* into *n* parts, maintaining - order. - - >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6]) - >>> list(group_1) - [1, 2, 3] - >>> list(group_2) - [4, 5, 6] - - If the length of *iterable* is not evenly divisible by *n*, then the - length of the returned iterables will not be identical: - - >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7]) - >>> [list(c) for c in children] - [[1, 2, 3], [4, 5], [6, 7]] - - If the length of the iterable is smaller than n, then the last returned - iterables will be empty: - - >>> children = divide(5, [1, 2, 3]) - >>> [list(c) for c in children] - [[1], [2], [3], [], []] - - This function will exhaust the iterable before returning and may require - significant storage. If order is not important, see :func:`distribute`, - which does not first pull the iterable into memory. - - """ - if n < 1: - raise ValueError('n must be at least 1') - - try: - iterable[:0] - except TypeError: - seq = tuple(iterable) - else: - seq = iterable - - q, r = divmod(len(seq), n) - - ret = [] - stop = 0 - for i in range(1, n + 1): - start = stop - stop += q + 1 if i <= r else q - ret.append(iter(seq[start:stop])) - - return ret - - -def always_iterable(obj, base_type=(str, bytes)): - """If *obj* is iterable, return an iterator over its items:: - - >>> obj = (1, 2, 3) - >>> list(always_iterable(obj)) - [1, 2, 3] - - If *obj* is not iterable, return a one-item iterable containing *obj*:: - - >>> obj = 1 - >>> list(always_iterable(obj)) - [1] - - If *obj* is ``None``, return an empty iterable: - - >>> obj = None - >>> list(always_iterable(None)) - [] - - By default, binary and text strings are not considered iterable:: - - >>> obj = 'foo' - >>> list(always_iterable(obj)) - ['foo'] - - If *base_type* is set, objects for which ``isinstance(obj, base_type)`` - returns ``True`` won't be considered iterable. - - >>> obj = {'a': 1} - >>> list(always_iterable(obj)) # Iterate over the dict's keys - ['a'] - >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit - [{'a': 1}] - - Set *base_type* to ``None`` to avoid any special handling and treat objects - Python considers iterable as iterable: - - >>> obj = 'foo' - >>> list(always_iterable(obj, base_type=None)) - ['f', 'o', 'o'] - """ - if obj is None: - return iter(()) - - if (base_type is not None) and isinstance(obj, base_type): - return iter((obj,)) - - try: - return iter(obj) - except TypeError: - return iter((obj,)) - - -def adjacent(predicate, iterable, distance=1): - """Return an iterable over `(bool, item)` tuples where the `item` is - drawn from *iterable* and the `bool` indicates whether - that item satisfies the *predicate* or is adjacent to an item that does. - - For example, to find whether items are adjacent to a ``3``:: - - >>> list(adjacent(lambda x: x == 3, range(6))) - [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] - - Set *distance* to change what counts as adjacent. For example, to find - whether items are two places away from a ``3``: - - >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) - [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] - - This is useful for contextualizing the results of a search function. - For example, a code comparison tool might want to identify lines that - have changed, but also surrounding lines to give the viewer of the diff - context. - - The predicate function will only be called once for each item in the - iterable. - - See also :func:`groupby_transform`, which can be used with this function - to group ranges of items with the same `bool` value. - - """ - # Allow distance=0 mainly for testing that it reproduces results with map() - if distance < 0: - raise ValueError('distance must be at least 0') - - i1, i2 = tee(iterable) - padding = [False] * distance - selected = chain(padding, map(predicate, i1), padding) - adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) - return zip(adjacent_to_selected, i2) - - -def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None): - """An extension of :func:`itertools.groupby` that can apply transformations - to the grouped data. - - * *keyfunc* is a function computing a key value for each item in *iterable* - * *valuefunc* is a function that transforms the individual items from - *iterable* after grouping - * *reducefunc* is a function that transforms each group of items - - >>> iterable = 'aAAbBBcCC' - >>> keyfunc = lambda k: k.upper() - >>> valuefunc = lambda v: v.lower() - >>> reducefunc = lambda g: ''.join(g) - >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc)) - [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')] - - Each optional argument defaults to an identity function if not specified. - - :func:`groupby_transform` is useful when grouping elements of an iterable - using a separate iterable as the key. To do this, :func:`zip` the iterables - and pass a *keyfunc* that extracts the first element and a *valuefunc* - that extracts the second element:: - - >>> from operator import itemgetter - >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3] - >>> values = 'abcdefghi' - >>> iterable = zip(keys, values) - >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1)) - >>> [(k, ''.join(g)) for k, g in grouper] - [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')] - - Note that the order of items in the iterable is significant. - Only adjacent items are grouped together, so if you don't want any - duplicate groups, you should sort the iterable by the key function. - - """ - ret = groupby(iterable, keyfunc) - if valuefunc: - ret = ((k, map(valuefunc, g)) for k, g in ret) - if reducefunc: - ret = ((k, reducefunc(g)) for k, g in ret) - - return ret - - -class numeric_range(abc.Sequence, abc.Hashable): - """An extension of the built-in ``range()`` function whose arguments can - be any orderable numeric type. - - With only *stop* specified, *start* defaults to ``0`` and *step* - defaults to ``1``. The output items will match the type of *stop*: - - >>> list(numeric_range(3.5)) - [0.0, 1.0, 2.0, 3.0] - - With only *start* and *stop* specified, *step* defaults to ``1``. The - output items will match the type of *start*: - - >>> from decimal import Decimal - >>> start = Decimal('2.1') - >>> stop = Decimal('5.1') - >>> list(numeric_range(start, stop)) - [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')] - - With *start*, *stop*, and *step* specified the output items will match - the type of ``start + step``: - - >>> from fractions import Fraction - >>> start = Fraction(1, 2) # Start at 1/2 - >>> stop = Fraction(5, 2) # End at 5/2 - >>> step = Fraction(1, 2) # Count by 1/2 - >>> list(numeric_range(start, stop, step)) - [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)] - - If *step* is zero, ``ValueError`` is raised. Negative steps are supported: - - >>> list(numeric_range(3, -1, -1.0)) - [3.0, 2.0, 1.0, 0.0] - - Be aware of the limitations of floating point numbers; the representation - of the yielded numbers may be surprising. - - ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* - is a ``datetime.timedelta`` object: - - >>> import datetime - >>> start = datetime.datetime(2019, 1, 1) - >>> stop = datetime.datetime(2019, 1, 3) - >>> step = datetime.timedelta(days=1) - >>> items = iter(numeric_range(start, stop, step)) - >>> next(items) - datetime.datetime(2019, 1, 1, 0, 0) - >>> next(items) - datetime.datetime(2019, 1, 2, 0, 0) - - """ - - _EMPTY_HASH = hash(range(0, 0)) - - def __init__(self, *args): - argc = len(args) - if argc == 1: - (self._stop,) = args - self._start = type(self._stop)(0) - self._step = type(self._stop - self._start)(1) - elif argc == 2: - self._start, self._stop = args - self._step = type(self._stop - self._start)(1) - elif argc == 3: - self._start, self._stop, self._step = args - elif argc == 0: - raise TypeError( - 'numeric_range expected at least ' - '1 argument, got {}'.format(argc) - ) - else: - raise TypeError( - 'numeric_range expected at most ' - '3 arguments, got {}'.format(argc) - ) - - self._zero = type(self._step)(0) - if self._step == self._zero: - raise ValueError('numeric_range() arg 3 must not be zero') - self._growing = self._step > self._zero - self._init_len() - - def __bool__(self): - if self._growing: - return self._start < self._stop - else: - return self._start > self._stop - - def __contains__(self, elem): - if self._growing: - if self._start <= elem < self._stop: - return (elem - self._start) % self._step == self._zero - else: - if self._start >= elem > self._stop: - return (self._start - elem) % (-self._step) == self._zero - - return False - - def __eq__(self, other): - if isinstance(other, numeric_range): - empty_self = not bool(self) - empty_other = not bool(other) - if empty_self or empty_other: - return empty_self and empty_other # True if both empty - else: - return ( - self._start == other._start - and self._step == other._step - and self._get_by_index(-1) == other._get_by_index(-1) - ) - else: - return False - - def __getitem__(self, key): - if isinstance(key, int): - return self._get_by_index(key) - elif isinstance(key, slice): - step = self._step if key.step is None else key.step * self._step - - if key.start is None or key.start <= -self._len: - start = self._start - elif key.start >= self._len: - start = self._stop - else: # -self._len < key.start < self._len - start = self._get_by_index(key.start) - - if key.stop is None or key.stop >= self._len: - stop = self._stop - elif key.stop <= -self._len: - stop = self._start - else: # -self._len < key.stop < self._len - stop = self._get_by_index(key.stop) - - return numeric_range(start, stop, step) - else: - raise TypeError( - 'numeric range indices must be ' - 'integers or slices, not {}'.format(type(key).__name__) - ) - - def __hash__(self): - if self: - return hash((self._start, self._get_by_index(-1), self._step)) - else: - return self._EMPTY_HASH - - def __iter__(self): - values = (self._start + (n * self._step) for n in count()) - if self._growing: - return takewhile(partial(gt, self._stop), values) - else: - return takewhile(partial(lt, self._stop), values) - - def __len__(self): - return self._len - - def _init_len(self): - if self._growing: - start = self._start - stop = self._stop - step = self._step - else: - start = self._stop - stop = self._start - step = -self._step - distance = stop - start - if distance <= self._zero: - self._len = 0 - else: # distance > 0 and step > 0: regular euclidean division - q, r = divmod(distance, step) - self._len = int(q) + int(r != self._zero) - - def __reduce__(self): - return numeric_range, (self._start, self._stop, self._step) - - def __repr__(self): - if self._step == 1: - return "numeric_range({}, {})".format( - repr(self._start), repr(self._stop) - ) - else: - return "numeric_range({}, {}, {})".format( - repr(self._start), repr(self._stop), repr(self._step) - ) - - def __reversed__(self): - return iter( - numeric_range( - self._get_by_index(-1), self._start - self._step, -self._step - ) - ) - - def count(self, value): - return int(value in self) - - def index(self, value): - if self._growing: - if self._start <= value < self._stop: - q, r = divmod(value - self._start, self._step) - if r == self._zero: - return int(q) - else: - if self._start >= value > self._stop: - q, r = divmod(self._start - value, -self._step) - if r == self._zero: - return int(q) - - raise ValueError("{} is not in numeric range".format(value)) - - def _get_by_index(self, i): - if i < 0: - i += self._len - if i < 0 or i >= self._len: - raise IndexError("numeric range object index out of range") - return self._start + i * self._step - - -def count_cycle(iterable, n=None): - """Cycle through the items from *iterable* up to *n* times, yielding - the number of completed cycles along with each item. If *n* is omitted the - process repeats indefinitely. - - >>> list(count_cycle('AB', 3)) - [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] - - """ - iterable = tuple(iterable) - if not iterable: - return iter(()) - counter = count() if n is None else range(n) - return ((i, item) for i in counter for item in iterable) - - -def mark_ends(iterable): - """Yield 3-tuples of the form ``(is_first, is_last, item)``. - - >>> list(mark_ends('ABC')) - [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')] - - Use this when looping over an iterable to take special action on its first - and/or last items: - - >>> iterable = ['Header', 100, 200, 'Footer'] - >>> total = 0 - >>> for is_first, is_last, item in mark_ends(iterable): - ... if is_first: - ... continue # Skip the header - ... if is_last: - ... continue # Skip the footer - ... total += item - >>> print(total) - 300 - """ - it = iter(iterable) - - try: - b = next(it) - except StopIteration: - return - - try: - for i in count(): - a = b - b = next(it) - yield i == 0, False, a - - except StopIteration: - yield i == 0, True, a - - -def locate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(locate([0, 1, 1, 0, 1, 0, 0])) - [1, 2, 4] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item. - - >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b')) - [1, 3] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(locate(iterable, pred=pred, window_size=3)) - [1, 5, 9] - - Use with :func:`seekable` to find indexes and then retrieve the associated - items: - - >>> from itertools import count - >>> from more_itertools import seekable - >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count()) - >>> it = seekable(source) - >>> pred = lambda x: x > 100 - >>> indexes = locate(it, pred=pred) - >>> i = next(indexes) - >>> it.seek(i) - >>> next(it) - 106 - - """ - if window_size is None: - return compress(count(), map(pred, iterable)) - - if window_size < 1: - raise ValueError('window size must be at least 1') - - it = windowed(iterable, window_size, fillvalue=_marker) - return compress(count(), starmap(pred, it)) - - -def lstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the beginning - for which *pred* returns ``True``. - - For example, to remove a set of items from the start of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(lstrip(iterable, pred)) - [1, 2, None, 3, False, None] - - This function is analogous to to :func:`str.lstrip`, and is essentially - an wrapper for :func:`itertools.dropwhile`. - - """ - return dropwhile(pred, iterable) - - -def rstrip(iterable, pred): - """Yield the items from *iterable*, but strip any from the end - for which *pred* returns ``True``. - - For example, to remove a set of items from the end of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(rstrip(iterable, pred)) - [None, False, None, 1, 2, None, 3] - - This function is analogous to :func:`str.rstrip`. - - """ - cache = [] - cache_append = cache.append - cache_clear = cache.clear - for x in iterable: - if pred(x): - cache_append(x) - else: - yield from cache - cache_clear() - yield x - - -def strip(iterable, pred): - """Yield the items from *iterable*, but strip any from the - beginning and end for which *pred* returns ``True``. - - For example, to remove a set of items from both ends of an iterable: - - >>> iterable = (None, False, None, 1, 2, None, 3, False, None) - >>> pred = lambda x: x in {None, False, ''} - >>> list(strip(iterable, pred)) - [1, 2, None, 3] - - This function is analogous to :func:`str.strip`. - - """ - return rstrip(lstrip(iterable, pred), pred) - - -class islice_extended: - """An extension of :func:`itertools.islice` that supports negative values - for *stop*, *start*, and *step*. - - >>> iterable = iter('abcdefgh') - >>> list(islice_extended(iterable, -4, -1)) - ['e', 'f', 'g'] - - Slices with negative values require some caching of *iterable*, but this - function takes care to minimize the amount of memory required. - - For example, you can use a negative step with an infinite iterator: - - >>> from itertools import count - >>> list(islice_extended(count(), 110, 99, -2)) - [110, 108, 106, 104, 102, 100] - - You can also use slice notation directly: - - >>> iterable = map(str, count()) - >>> it = islice_extended(iterable)[10:20:2] - >>> list(it) - ['10', '12', '14', '16', '18'] - - """ - - def __init__(self, iterable, *args): - it = iter(iterable) - if args: - self._iterable = _islice_helper(it, slice(*args)) - else: - self._iterable = it - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterable) - - def __getitem__(self, key): - if isinstance(key, slice): - return islice_extended(_islice_helper(self._iterable, key)) - - raise TypeError('islice_extended.__getitem__ argument must be a slice') - - -def _islice_helper(it, s): - start = s.start - stop = s.stop - if s.step == 0: - raise ValueError('step argument must be a non-zero integer or None.') - step = s.step or 1 - - if step > 0: - start = 0 if (start is None) else start - - if start < 0: - # Consume all but the last -start items - cache = deque(enumerate(it, 1), maxlen=-start) - len_iter = cache[-1][0] if cache else 0 - - # Adjust start to be positive - i = max(len_iter + start, 0) - - # Adjust stop to be positive - if stop is None: - j = len_iter - elif stop >= 0: - j = min(stop, len_iter) - else: - j = max(len_iter + stop, 0) - - # Slice the cache - n = j - i - if n <= 0: - return - - for index, item in islice(cache, 0, n, step): - yield item - elif (stop is not None) and (stop < 0): - # Advance to the start position - next(islice(it, start, start), None) - - # When stop is negative, we have to carry -stop items while - # iterating - cache = deque(islice(it, -stop), maxlen=-stop) - - for index, item in enumerate(it): - cached_item = cache.popleft() - if index % step == 0: - yield cached_item - cache.append(item) - else: - # When both start and stop are positive we have the normal case - yield from islice(it, start, stop, step) - else: - start = -1 if (start is None) else start - - if (stop is not None) and (stop < 0): - # Consume all but the last items - n = -stop - 1 - cache = deque(enumerate(it, 1), maxlen=n) - len_iter = cache[-1][0] if cache else 0 - - # If start and stop are both negative they are comparable and - # we can just slice. Otherwise we can adjust start to be negative - # and then slice. - if start < 0: - i, j = start, stop - else: - i, j = min(start - len_iter, -1), None - - for index, item in list(cache)[i:j:step]: - yield item - else: - # Advance to the stop position - if stop is not None: - m = stop + 1 - next(islice(it, m, m), None) - - # stop is positive, so if start is negative they are not comparable - # and we need the rest of the items. - if start < 0: - i = start - n = None - # stop is None and start is positive, so we just need items up to - # the start index. - elif stop is None: - i = None - n = start + 1 - # Both stop and start are positive, so they are comparable. - else: - i = None - n = start - stop - if n <= 0: - return - - cache = list(islice(it, n)) - - yield from cache[i::step] - - -def always_reversible(iterable): - """An extension of :func:`reversed` that supports all iterables, not - just those which implement the ``Reversible`` or ``Sequence`` protocols. - - >>> print(*always_reversible(x for x in range(3))) - 2 1 0 - - If the iterable is already reversible, this function returns the - result of :func:`reversed()`. If the iterable is not reversible, - this function will cache the remaining items in the iterable and - yield them in reverse order, which may require significant storage. - """ - try: - return reversed(iterable) - except TypeError: - return reversed(list(iterable)) - - -def consecutive_groups(iterable, ordering=lambda x: x): - """Yield groups of consecutive items using :func:`itertools.groupby`. - The *ordering* function determines whether two items are adjacent by - returning their position. - - By default, the ordering function is the identity function. This is - suitable for finding runs of numbers: - - >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40] - >>> for group in consecutive_groups(iterable): - ... print(list(group)) - [1] - [10, 11, 12] - [20] - [30, 31, 32, 33] - [40] - - For finding runs of adjacent letters, try using the :meth:`index` method - of a string of letters: - - >>> from string import ascii_lowercase - >>> iterable = 'abcdfgilmnop' - >>> ordering = ascii_lowercase.index - >>> for group in consecutive_groups(iterable, ordering): - ... print(list(group)) - ['a', 'b', 'c', 'd'] - ['f', 'g'] - ['i'] - ['l', 'm', 'n', 'o', 'p'] - - Each group of consecutive items is an iterator that shares it source with - *iterable*. When an an output group is advanced, the previous group is - no longer available unless its elements are copied (e.g., into a ``list``). - - >>> iterable = [1, 2, 11, 12, 21, 22] - >>> saved_groups = [] - >>> for group in consecutive_groups(iterable): - ... saved_groups.append(list(group)) # Copy group elements - >>> saved_groups - [[1, 2], [11, 12], [21, 22]] - - """ - for k, g in groupby( - enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) - ): - yield map(itemgetter(1), g) - - -def difference(iterable, func=sub, *, initial=None): - """This function is the inverse of :func:`itertools.accumulate`. By default - it will compute the first difference of *iterable* using - :func:`operator.sub`: - - >>> from itertools import accumulate - >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10 - >>> list(difference(iterable)) - [0, 1, 2, 3, 4] - - *func* defaults to :func:`operator.sub`, but other functions can be - specified. They will be applied as follows:: - - A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ... - - For example, to do progressive division: - - >>> iterable = [1, 2, 6, 24, 120] - >>> func = lambda x, y: x // y - >>> list(difference(iterable, func)) - [1, 2, 3, 4, 5] - - If the *initial* keyword is set, the first element will be skipped when - computing successive differences. - - >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10) - >>> list(difference(it, initial=10)) - [1, 2, 3] - - """ - a, b = tee(iterable) - try: - first = [next(b)] - except StopIteration: - return iter([]) - - if initial is not None: - first = [] - - return chain(first, starmap(func, zip(b, a))) - - -class SequenceView(Sequence): - """Return a read-only view of the sequence object *target*. - - :class:`SequenceView` objects are analogous to Python's built-in - "dictionary view" types. They provide a dynamic view of a sequence's items, - meaning that when the sequence updates, so does the view. - - >>> seq = ['0', '1', '2'] - >>> view = SequenceView(seq) - >>> view - SequenceView(['0', '1', '2']) - >>> seq.append('3') - >>> view - SequenceView(['0', '1', '2', '3']) - - Sequence views support indexing, slicing, and length queries. They act - like the underlying sequence, except they don't allow assignment: - - >>> view[1] - '1' - >>> view[1:-1] - ['1', '2'] - >>> len(view) - 4 - - Sequence views are useful as an alternative to copying, as they don't - require (much) extra storage. - - """ - - def __init__(self, target): - if not isinstance(target, Sequence): - raise TypeError - self._target = target - - def __getitem__(self, index): - return self._target[index] - - def __len__(self): - return len(self._target) - - def __repr__(self): - return '{}({})'.format(self.__class__.__name__, repr(self._target)) - - -class seekable: - """Wrap an iterator to allow for seeking backward and forward. This - progressively caches the items in the source iterable so they can be - re-visited. - - Call :meth:`seek` with an index to seek to that position in the source - iterable. - - To "reset" an iterator, seek to ``0``: - - >>> from itertools import count - >>> it = seekable((str(n) for n in count())) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> it.seek(0) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> next(it) - '3' - - You can also seek forward: - - >>> it = seekable((str(n) for n in range(20))) - >>> it.seek(10) - >>> next(it) - '10' - >>> it.seek(20) # Seeking past the end of the source isn't a problem - >>> list(it) - [] - >>> it.seek(0) # Resetting works even after hitting the end - >>> next(it), next(it), next(it) - ('0', '1', '2') - - Call :meth:`peek` to look ahead one item without advancing the iterator: - - >>> it = seekable('1234') - >>> it.peek() - '1' - >>> list(it) - ['1', '2', '3', '4'] - >>> it.peek(default='empty') - 'empty' - - Before the iterator is at its end, calling :func:`bool` on it will return - ``True``. After it will return ``False``: - - >>> it = seekable('5678') - >>> bool(it) - True - >>> list(it) - ['5', '6', '7', '8'] - >>> bool(it) - False - - You may view the contents of the cache with the :meth:`elements` method. - That returns a :class:`SequenceView`, a view that updates automatically: - - >>> it = seekable((str(n) for n in range(10))) - >>> next(it), next(it), next(it) - ('0', '1', '2') - >>> elements = it.elements() - >>> elements - SequenceView(['0', '1', '2']) - >>> next(it) - '3' - >>> elements - SequenceView(['0', '1', '2', '3']) - - By default, the cache grows as the source iterable progresses, so beware of - wrapping very large or infinite iterables. Supply *maxlen* to limit the - size of the cache (this of course limits how far back you can seek). - - >>> from itertools import count - >>> it = seekable((str(n) for n in count()), maxlen=2) - >>> next(it), next(it), next(it), next(it) - ('0', '1', '2', '3') - >>> list(it.elements()) - ['2', '3'] - >>> it.seek(0) - >>> next(it), next(it), next(it), next(it) - ('2', '3', '4', '5') - >>> next(it) - '6' - - """ - - def __init__(self, iterable, maxlen=None): - self._source = iter(iterable) - if maxlen is None: - self._cache = [] - else: - self._cache = deque([], maxlen) - self._index = None - - def __iter__(self): - return self - - def __next__(self): - if self._index is not None: - try: - item = self._cache[self._index] - except IndexError: - self._index = None - else: - self._index += 1 - return item - - item = next(self._source) - self._cache.append(item) - return item - - def __bool__(self): - try: - self.peek() - except StopIteration: - return False - return True - - def peek(self, default=_marker): - try: - peeked = next(self) - except StopIteration: - if default is _marker: - raise - return default - if self._index is None: - self._index = len(self._cache) - self._index -= 1 - return peeked - - def elements(self): - return SequenceView(self._cache) - - def seek(self, index): - self._index = index - remainder = index - len(self._cache) - if remainder > 0: - consume(self, remainder) - - -class run_length: - """ - :func:`run_length.encode` compresses an iterable with run-length encoding. - It yields groups of repeated items with the count of how many times they - were repeated: - - >>> uncompressed = 'abbcccdddd' - >>> list(run_length.encode(uncompressed)) - [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - - :func:`run_length.decode` decompresses an iterable that was previously - compressed with run-length encoding. It yields the items of the - decompressed iterable: - - >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] - >>> list(run_length.decode(compressed)) - ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd'] - - """ - - @staticmethod - def encode(iterable): - return ((k, ilen(g)) for k, g in groupby(iterable)) - - @staticmethod - def decode(iterable): - return chain.from_iterable(repeat(k, n) for k, n in iterable) - - -def exactly_n(iterable, n, predicate=bool): - """Return ``True`` if exactly ``n`` items in the iterable are ``True`` - according to the *predicate* function. - - >>> exactly_n([True, True, False], 2) - True - >>> exactly_n([True, True, False], 1) - False - >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3) - True - - The iterable will be advanced until ``n + 1`` truthy items are encountered, - so avoid calling it on infinite iterables. - - """ - return len(take(n + 1, filter(predicate, iterable))) == n - - -def circular_shifts(iterable): - """Return a list of circular shifts of *iterable*. - - >>> circular_shifts(range(4)) - [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] - """ - lst = list(iterable) - return take(len(lst), windowed(cycle(lst), len(lst))) - - -def make_decorator(wrapping_func, result_index=0): - """Return a decorator version of *wrapping_func*, which is a function that - modifies an iterable. *result_index* is the position in that function's - signature where the iterable goes. - - This lets you use itertools on the "production end," i.e. at function - definition. This can augment what the function returns without changing the - function's code. - - For example, to produce a decorator version of :func:`chunked`: - - >>> from more_itertools import chunked - >>> chunker = make_decorator(chunked, result_index=0) - >>> @chunker(3) - ... def iter_range(n): - ... return iter(range(n)) - ... - >>> list(iter_range(9)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - - To only allow truthy items to be returned: - - >>> truth_serum = make_decorator(filter, result_index=1) - >>> @truth_serum(bool) - ... def boolean_test(): - ... return [0, 1, '', ' ', False, True] - ... - >>> list(boolean_test()) - [1, ' ', True] - - The :func:`peekable` and :func:`seekable` wrappers make for practical - decorators: - - >>> from more_itertools import peekable - >>> peekable_function = make_decorator(peekable) - >>> @peekable_function() - ... def str_range(*args): - ... return (str(x) for x in range(*args)) - ... - >>> it = str_range(1, 20, 2) - >>> next(it), next(it), next(it) - ('1', '3', '5') - >>> it.peek() - '7' - >>> next(it) - '7' - - """ - # See https://sites.google.com/site/bbayles/index/decorator_factory for - # notes on how this works. - def decorator(*wrapping_args, **wrapping_kwargs): - def outer_wrapper(f): - def inner_wrapper(*args, **kwargs): - result = f(*args, **kwargs) - wrapping_args_ = list(wrapping_args) - wrapping_args_.insert(result_index, result) - return wrapping_func(*wrapping_args_, **wrapping_kwargs) - - return inner_wrapper - - return outer_wrapper - - return decorator - - -def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): - """Return a dictionary that maps the items in *iterable* to categories - defined by *keyfunc*, transforms them with *valuefunc*, and - then summarizes them by category with *reducefunc*. - - *valuefunc* defaults to the identity function if it is unspecified. - If *reducefunc* is unspecified, no summarization takes place: - - >>> keyfunc = lambda x: x.upper() - >>> result = map_reduce('abbccc', keyfunc) - >>> sorted(result.items()) - [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] - - Specifying *valuefunc* transforms the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> result = map_reduce('abbccc', keyfunc, valuefunc) - >>> sorted(result.items()) - [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] - - Specifying *reducefunc* summarizes the categorized items: - - >>> keyfunc = lambda x: x.upper() - >>> valuefunc = lambda x: 1 - >>> reducefunc = sum - >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) - >>> sorted(result.items()) - [('A', 1), ('B', 2), ('C', 3)] - - You may want to filter the input iterable before applying the map/reduce - procedure: - - >>> all_items = range(30) - >>> items = [x for x in all_items if 10 <= x <= 20] # Filter - >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 - >>> categories = map_reduce(items, keyfunc=keyfunc) - >>> sorted(categories.items()) - [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] - >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) - >>> sorted(summaries.items()) - [(0, 90), (1, 75)] - - Note that all items in the iterable are gathered into a list before the - summarization step, which may require significant storage. - - The returned object is a :obj:`collections.defaultdict` with the - ``default_factory`` set to ``None``, such that it behaves like a normal - dictionary. - - """ - valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc - - ret = defaultdict(list) - for item in iterable: - key = keyfunc(item) - value = valuefunc(item) - ret[key].append(value) - - if reducefunc is not None: - for key, value_list in ret.items(): - ret[key] = reducefunc(value_list) - - ret.default_factory = None - return ret - - -def rlocate(iterable, pred=bool, window_size=None): - """Yield the index of each item in *iterable* for which *pred* returns - ``True``, starting from the right and moving left. - - *pred* defaults to :func:`bool`, which will select truthy items: - - >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 - [4, 2, 1] - - Set *pred* to a custom function to, e.g., find the indexes for a particular - item: - - >>> iterable = iter('abcb') - >>> pred = lambda x: x == 'b' - >>> list(rlocate(iterable, pred)) - [3, 1] - - If *window_size* is given, then the *pred* function will be called with - that many items. This enables searching for sub-sequences: - - >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] - >>> pred = lambda *args: args == (1, 2, 3) - >>> list(rlocate(iterable, pred=pred, window_size=3)) - [9, 5, 1] - - Beware, this function won't return anything for infinite iterables. - If *iterable* is reversible, ``rlocate`` will reverse it and search from - the right. Otherwise, it will search from the left and return the results - in reverse order. - - See :func:`locate` to for other example applications. - - """ - if window_size is None: - try: - len_iter = len(iterable) - return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) - except TypeError: - pass - - return reversed(list(locate(iterable, pred, window_size))) - - -def replace(iterable, pred, substitutes, count=None, window_size=1): - """Yield the items from *iterable*, replacing the items for which *pred* - returns ``True`` with the items from the iterable *substitutes*. - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1] - >>> pred = lambda x: x == 0 - >>> substitutes = (2, 3) - >>> list(replace(iterable, pred, substitutes)) - [1, 1, 2, 3, 1, 1, 2, 3, 1, 1] - - If *count* is given, the number of replacements will be limited: - - >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0] - >>> pred = lambda x: x == 0 - >>> substitutes = [None] - >>> list(replace(iterable, pred, substitutes, count=2)) - [1, 1, None, 1, 1, None, 1, 1, 0] - - Use *window_size* to control the number of items passed as arguments to - *pred*. This allows for locating and replacing subsequences. - - >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5] - >>> window_size = 3 - >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred - >>> substitutes = [3, 4] # Splice in these items - >>> list(replace(iterable, pred, substitutes, window_size=window_size)) - [3, 4, 5, 3, 4, 5] - - """ - if window_size < 1: - raise ValueError('window_size must be at least 1') - - # Save the substitutes iterable, since it's used more than once - substitutes = tuple(substitutes) - - # Add padding such that the number of windows matches the length of the - # iterable - it = chain(iterable, [_marker] * (window_size - 1)) - windows = windowed(it, window_size) - - n = 0 - for w in windows: - # If the current window matches our predicate (and we haven't hit - # our maximum number of replacements), splice in the substitutes - # and then consume the following windows that overlap with this one. - # For example, if the iterable is (0, 1, 2, 3, 4...) - # and the window size is 2, we have (0, 1), (1, 2), (2, 3)... - # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2) - if pred(*w): - if (count is None) or (n < count): - n += 1 - yield from substitutes - consume(windows, window_size - 1) - continue - - # If there was no match (or we've reached the replacement limit), - # yield the first item from the window. - if w and (w[0] is not _marker): - yield w[0] - - -def partitions(iterable): - """Yield all possible order-preserving partitions of *iterable*. - - >>> iterable = 'abc' - >>> for part in partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['a', 'b', 'c'] - - This is unrelated to :func:`partition`. - - """ - sequence = list(iterable) - n = len(sequence) - for i in powerset(range(1, n)): - yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] - - -def set_partitions(iterable, k=None): - """ - Yield the set partitions of *iterable* into *k* parts. Set partitions are - not order-preserving. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable, 2): - ... print([''.join(p) for p in part]) - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - - - If *k* is not given, every set partition is generated. - - >>> iterable = 'abc' - >>> for part in set_partitions(iterable): - ... print([''.join(p) for p in part]) - ['abc'] - ['a', 'bc'] - ['ab', 'c'] - ['b', 'ac'] - ['a', 'b', 'c'] - - """ - L = list(iterable) - n = len(L) - if k is not None: - if k < 1: - raise ValueError( - "Can't partition in a negative or zero number of groups" - ) - elif k > n: - return - - def set_partitions_helper(L, k): - n = len(L) - if k == 1: - yield [L] - elif n == k: - yield [[s] for s in L] - else: - e, *M = L - for p in set_partitions_helper(M, k - 1): - yield [[e], *p] - for p in set_partitions_helper(M, k): - for i in range(len(p)): - yield p[:i] + [[e] + p[i]] + p[i + 1 :] - - if k is None: - for k in range(1, n + 1): - yield from set_partitions_helper(L, k) - else: - yield from set_partitions_helper(L, k) - - -class time_limited: - """ - Yield items from *iterable* until *limit_seconds* have passed. - If the time limit expires before all items have been yielded, the - ``timed_out`` parameter will be set to ``True``. - - >>> from time import sleep - >>> def generator(): - ... yield 1 - ... yield 2 - ... sleep(0.2) - ... yield 3 - >>> iterable = time_limited(0.1, generator()) - >>> list(iterable) - [1, 2] - >>> iterable.timed_out - True - - Note that the time is checked before each item is yielded, and iteration - stops if the time elapsed is greater than *limit_seconds*. If your time - limit is 1 second, but it takes 2 seconds to generate the first item from - the iterable, the function will run for 2 seconds and not yield anything. - - """ - - def __init__(self, limit_seconds, iterable): - if limit_seconds < 0: - raise ValueError('limit_seconds must be positive') - self.limit_seconds = limit_seconds - self._iterable = iter(iterable) - self._start_time = monotonic() - self.timed_out = False - - def __iter__(self): - return self - - def __next__(self): - item = next(self._iterable) - if monotonic() - self._start_time > self.limit_seconds: - self.timed_out = True - raise StopIteration - - return item - - -def only(iterable, default=None, too_long=None): - """If *iterable* has only one item, return it. - If it has zero items, return *default*. - If it has more than one item, raise the exception given by *too_long*, - which is ``ValueError`` by default. - - >>> only([], default='missing') - 'missing' - >>> only([1]) - 1 - >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - ValueError: Expected exactly one item in iterable, but got 1, 2, - and perhaps more.' - >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - ... - TypeError - - Note that :func:`only` attempts to advance *iterable* twice to ensure there - is only one item. See :func:`spy` or :func:`peekable` to check - iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value - - -def ichunked(iterable, n): - """Break *iterable* into sub-iterables with *n* elements each. - :func:`ichunked` is like :func:`chunked`, but it yields iterables - instead of lists. - - If the sub-iterables are read in order, the elements of *iterable* - won't be stored in memory. - If they are read out of order, :func:`itertools.tee` is used to cache - elements as necessary. - - >>> from itertools import count - >>> all_chunks = ichunked(count(), 4) - >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks) - >>> list(c_2) # c_1's elements have been cached; c_3's haven't been - [4, 5, 6, 7] - >>> list(c_1) - [0, 1, 2, 3] - >>> list(c_3) - [8, 9, 10, 11] - - """ - source = iter(iterable) - - while True: - # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: - return - - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) - - # Advance the source iterable - consume(source, n) - - -def distinct_combinations(iterable, r): - """Yield the distinct combinations of *r* items taken from *iterable*. - - >>> list(distinct_combinations([0, 0, 1], 2)) - [(0, 0), (0, 1)] - - Equivalent to ``set(combinations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. - - """ - if r < 0: - raise ValueError('r must be non-negative') - elif r == 0: - yield () - return - pool = tuple(iterable) - generators = [unique_everseen(enumerate(pool), key=itemgetter(1))] - current_combo = [None] * r - level = 0 - while generators: - try: - cur_idx, p = next(generators[-1]) - except StopIteration: - generators.pop() - level -= 1 - continue - current_combo[level] = p - if level + 1 == r: - yield tuple(current_combo) - else: - generators.append( - unique_everseen( - enumerate(pool[cur_idx + 1 :], cur_idx + 1), - key=itemgetter(1), - ) - ) - level += 1 - - -def filter_except(validator, iterable, *exceptions): - """Yield the items from *iterable* for which the *validator* function does - not raise one of the specified *exceptions*. - - *validator* is called for each item in *iterable*. - It should be a function that accepts one argument and raises an exception - if that item is not valid. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(filter_except(int, iterable, ValueError, TypeError)) - ['1', '2', '4'] - - If an exception other than one given by *exceptions* is raised by - *validator*, it is raised like normal. - """ - for item in iterable: - try: - validator(item) - except exceptions: - pass - else: - yield item - - -def map_except(function, iterable, *exceptions): - """Transform each item from *iterable* with *function* and yield the - result, unless *function* raises one of the specified *exceptions*. - - *function* is called to transform each item in *iterable*. - It should be a accept one argument. - - >>> iterable = ['1', '2', 'three', '4', None] - >>> list(map_except(int, iterable, ValueError, TypeError)) - [1, 2, 4] - - If an exception other than one given by *exceptions* is raised by - *function*, it is raised like normal. - """ - for item in iterable: - try: - yield function(item) - except exceptions: - pass - - -def _sample_unweighted(iterable, k): - # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: - # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". - - # Fill up the reservoir (collection of samples) with the first `k` samples - reservoir = take(k, iterable) - - # Generate random number that's the largest in a sample of k U(0,1) numbers - # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic - W = exp(log(random()) / k) - - # The number of elements to skip before changing the reservoir is a random - # number with a geometric distribution. Sample it using random() and logs. - next_index = k + floor(log(random()) / log(1 - W)) - - for index, element in enumerate(iterable, k): - - if index == next_index: - reservoir[randrange(k)] = element - # The new W is the largest in a sample of k U(0, `old_W`) numbers - W *= exp(log(random()) / k) - next_index += floor(log(random()) / log(1 - W)) + 1 - - return reservoir - - -def _sample_weighted(iterable, k, weights): - # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : - # "Weighted random sampling with a reservoir". - - # Log-transform for numerical stability for weights that are small/large - weight_keys = (log(random()) / weight for weight in weights) - - # Fill up the reservoir (collection of samples) with the first `k` - # weight-keys and elements, then heapify the list. - reservoir = take(k, zip(weight_keys, iterable)) - heapify(reservoir) - - # The number of jumps before changing the reservoir is a random variable - # with an exponential distribution. Sample it using random() and logs. - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - - for weight, element in zip(weights, iterable): - if weight >= weights_to_skip: - # The notation here is consistent with the paper, but we store - # the weight-keys in log-space for better numerical stability. - smallest_weight_key, _ = reservoir[0] - t_w = exp(weight * smallest_weight_key) - r_2 = uniform(t_w, 1) # generate U(t_w, 1) - weight_key = log(r_2) / weight - heapreplace(reservoir, (weight_key, element)) - smallest_weight_key, _ = reservoir[0] - weights_to_skip = log(random()) / smallest_weight_key - else: - weights_to_skip -= weight - - # Equivalent to [element for weight_key, element in sorted(reservoir)] - return [heappop(reservoir)[1] for _ in range(k)] - - -def sample(iterable, k, weights=None): - """Return a *k*-length list of elements chosen (without replacement) - from the *iterable*. Like :func:`random.sample`, but works on iterables - of unknown length. - - >>> iterable = range(100) - >>> sample(iterable, 5) # doctest: +SKIP - [81, 60, 96, 16, 4] - - An iterable with *weights* may also be given: - - >>> iterable = range(100) - >>> weights = (i * i + 1 for i in range(100)) - >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP - [79, 67, 74, 66, 78] - - The algorithm can also be used to generate weighted random permutations. - The relative weight of each item determines the probability that it - appears late in the permutation. - - >>> data = "abcdefgh" - >>> weights = range(1, len(data) + 1) - >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP - ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] - """ - if k == 0: - return [] - - iterable = iter(iterable) - if weights is None: - return _sample_unweighted(iterable, k) - else: - weights = iter(weights) - return _sample_weighted(iterable, k, weights) - - -def is_sorted(iterable, key=None, reverse=False): - """Returns ``True`` if the items of iterable are in sorted order, and - ``False`` otherwise. *key* and *reverse* have the same meaning that they do - in the built-in :func:`sorted` function. - - >>> is_sorted(['1', '2', '3', '4', '5'], key=int) - True - >>> is_sorted([5, 4, 3, 1, 2], reverse=True) - False - - The function returns ``False`` after encountering the first out-of-order - item. If there are no out-of-order items, the iterable is exhausted. - """ - - compare = lt if reverse else gt - it = iterable if (key is None) else map(key, iterable) - return not any(starmap(compare, pairwise(it))) - - -class AbortThread(BaseException): - pass - - -class callback_iter: - """Convert a function that uses callbacks to an iterator. - - Let *func* be a function that takes a `callback` keyword argument. - For example: - - >>> def func(callback=None): - ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]: - ... if callback: - ... callback(i, c) - ... return 4 - - - Use ``with callback_iter(func)`` to get an iterator over the parameters - that are delivered to the callback. - - >>> with callback_iter(func) as it: - ... for args, kwargs in it: - ... print(args) - (1, 'a') - (2, 'b') - (3, 'c') - - The function will be called in a background thread. The ``done`` property - indicates whether it has completed execution. - - >>> it.done - True - - If it completes successfully, its return value will be available - in the ``result`` property. - - >>> it.result - 4 - - Notes: - - * If the function uses some keyword argument besides ``callback``, supply - *callback_kwd*. - * If it finished executing, but raised an exception, accessing the - ``result`` property will raise the same exception. - * If it hasn't finished executing, accessing the ``result`` - property from within the ``with`` block will raise ``RuntimeError``. - * If it hasn't finished executing, accessing the ``result`` property from - outside the ``with`` block will raise a - ``more_itertools.AbortThread`` exception. - * Provide *wait_seconds* to adjust how frequently the it is polled for - output. - - """ - - def __init__(self, func, callback_kwd='callback', wait_seconds=0.1): - self._func = func - self._callback_kwd = callback_kwd - self._aborted = False - self._future = None - self._wait_seconds = wait_seconds - self._executor = ThreadPoolExecutor(max_workers=1) - self._iterator = self._reader() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._aborted = True - self._executor.shutdown() - - def __iter__(self): - return self - - def __next__(self): - return next(self._iterator) - - @property - def done(self): - if self._future is None: - return False - return self._future.done() - - @property - def result(self): - if not self.done: - raise RuntimeError('Function has not yet completed') - - return self._future.result() - - def _reader(self): - q = Queue() - - def callback(*args, **kwargs): - if self._aborted: - raise AbortThread('canceled by user') - - q.put((args, kwargs)) - - self._future = self._executor.submit( - self._func, **{self._callback_kwd: callback} - ) - - while True: - try: - item = q.get(timeout=self._wait_seconds) - except Empty: - pass - else: - q.task_done() - yield item - - if self._future.done(): - break - - remaining = [] - while True: - try: - item = q.get_nowait() - except Empty: - break - else: - q.task_done() - remaining.append(item) - q.join() - yield from remaining - - -def windowed_complete(iterable, n): - """ - Yield ``(beginning, middle, end)`` tuples, where: - - * Each ``middle`` has *n* items from *iterable* - * Each ``beginning`` has the items before the ones in ``middle`` - * Each ``end`` has the items after the ones in ``middle`` - - >>> iterable = range(7) - >>> n = 3 - >>> for beginning, middle, end in windowed_complete(iterable, n): - ... print(beginning, middle, end) - () (0, 1, 2) (3, 4, 5, 6) - (0,) (1, 2, 3) (4, 5, 6) - (0, 1) (2, 3, 4) (5, 6) - (0, 1, 2) (3, 4, 5) (6,) - (0, 1, 2, 3) (4, 5, 6) () - - Note that *n* must be at least 0 and most equal to the length of - *iterable*. - - This function will exhaust the iterable and may require significant - storage. - """ - if n < 0: - raise ValueError('n must be >= 0') - - seq = tuple(iterable) - size = len(seq) - - if n > size: - raise ValueError('n must be <= len(seq)') - - for i in range(size - n + 1): - beginning = seq[:i] - middle = seq[i : i + n] - end = seq[i + n :] - yield beginning, middle, end - - -def all_unique(iterable, key=None): - """ - Returns ``True`` if all the elements of *iterable* are unique (no two - elements are equal). - - >>> all_unique('ABCB') - False - - If a *key* function is specified, it will be used to make comparisons. - - >>> all_unique('ABCb') - True - >>> all_unique('ABCb', str.lower) - False - - The function returns as soon as the first non-unique element is - encountered. Iterables with a mix of hashable and unhashable items can - be used, but the function will be slower for unhashable items. - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - for element in map(key, iterable) if key else iterable: - try: - if element in seenset: - return False - seenset_add(element) - except TypeError: - if element in seenlist: - return False - seenlist_add(element) - return True - - -def nth_product(index, *args): - """Equivalent to ``list(product(*args))[index]``. - - The products of *args* can be ordered lexicographically. - :func:`nth_product` computes the product at sort position *index* without - computing the previous products. - - >>> nth_product(8, range(2), range(2), range(2), range(2)) - (1, 0, 0, 0) - - ``IndexError`` will be raised if the given *index* is invalid. - """ - pools = list(map(tuple, reversed(args))) - ns = list(map(len, pools)) - - c = reduce(mul, ns) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - result = [] - for pool, n in zip(pools, ns): - result.append(pool[index % n]) - index //= n - - return tuple(reversed(result)) - - -def nth_permutation(iterable, r, index): - """Equivalent to ``list(permutations(iterable, r))[index]``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`nth_permutation` - computes the subsequence at sort position *index* directly, without - computing the previous subsequences. - - >>> nth_permutation('ghijk', 2, 5) - ('h', 'i') - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = list(iterable) - n = len(pool) - - if r is None or r == n: - r, c = n, factorial(n) - elif not 0 <= r < n: - raise ValueError - else: - c = factorial(n) // factorial(n - r) - - if index < 0: - index += c - - if not 0 <= index < c: - raise IndexError - - if c == 0: - return tuple() - - result = [0] * r - q = index * factorial(n) // c if r < n else index - for d in range(1, n + 1): - q, i = divmod(q, d) - if 0 <= n - d < r: - result[n - d] = i - if q == 0: - break - - return tuple(map(pool.pop, result)) - - -def value_chain(*args): - """Yield all arguments passed to the function in the same order in which - they were passed. If an argument itself is iterable then iterate over its - values. - - >>> list(value_chain(1, 2, 3, [4, 5, 6])) - [1, 2, 3, 4, 5, 6] - - Binary and text strings are not considered iterable and are emitted - as-is: - - >>> list(value_chain('12', '34', ['56', '78'])) - ['12', '34', '56', '78'] - - - Multiple levels of nesting are not flattened. - - """ - for value in args: - if isinstance(value, (str, bytes)): - yield value - continue - try: - yield from value - except TypeError: - yield value - - -def product_index(element, *args): - """Equivalent to ``list(product(*args)).index(element)`` - - The products of *args* can be ordered lexicographically. - :func:`product_index` computes the first index of *element* without - computing the previous products. - - >>> product_index([8, 2], range(10), range(5)) - 42 - - ``ValueError`` will be raised if the given *element* isn't in the product - of *args*. - """ - index = 0 - - for x, pool in zip_longest(element, args, fillvalue=_marker): - if x is _marker or pool is _marker: - raise ValueError('element is not a product of args') - - pool = tuple(pool) - index = index * len(pool) + pool.index(x) - - return index - - -def combination_index(element, iterable): - """Equivalent to ``list(combinations(iterable, r)).index(element)`` - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`combination_index` computes the index of the - first *element*, without computing the previous combinations. - - >>> combination_index('adf', 'abcdefg') - 10 - - ``ValueError`` will be raised if the given *element* isn't one of the - combinations of *iterable*. - """ - element = enumerate(element) - k, y = next(element, (None, None)) - if k is None: - return 0 - - indexes = [] - pool = enumerate(iterable) - for n, x in pool: - if x == y: - indexes.append(n) - tmp, y = next(element, (None, None)) - if tmp is None: - break - else: - k = tmp - else: - raise ValueError('element is not a combination of iterable') - - n, _ = last(pool, default=(n, None)) - - # Python versiosn below 3.8 don't have math.comb - index = 1 - for i, j in enumerate(reversed(indexes), start=1): - j = n - j - if i <= j: - index += factorial(j) // (factorial(i) * factorial(j - i)) - - return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index - - -def permutation_index(element, iterable): - """Equivalent to ``list(permutations(iterable, r)).index(element)``` - - The subsequences of *iterable* that are of length *r* where order is - important can be ordered lexicographically. :func:`permutation_index` - computes the index of the first *element* directly, without computing - the previous permutations. - - >>> permutation_index([1, 3, 2], range(5)) - 19 - - ``ValueError`` will be raised if the given *element* isn't one of the - permutations of *iterable*. - """ - index = 0 - pool = list(iterable) - for i, x in zip(range(len(pool), -1, -1), element): - r = pool.index(x) - index = index * i + r - del pool[r] - - return index - - -class countable: - """Wrap *iterable* and keep a count of how many items have been consumed. - - The ``items_seen`` attribute starts at ``0`` and increments as the iterable - is consumed: - - >>> iterable = map(str, range(10)) - >>> it = countable(iterable) - >>> it.items_seen - 0 - >>> next(it), next(it) - ('0', '1') - >>> list(it) - ['2', '3', '4', '5', '6', '7', '8', '9'] - >>> it.items_seen - 10 - """ - - def __init__(self, iterable): - self._it = iter(iterable) - self.items_seen = 0 - - def __iter__(self): - return self - - def __next__(self): - item = next(self._it) - self.items_seen += 1 - - return item diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/installer.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/installer.py deleted file mode 100644 index 57e2b587aae05167540abdd2b53c7b5bcac298f0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/installer.py +++ /dev/null @@ -1,97 +0,0 @@ -import glob -import os -import subprocess -import sys -import tempfile -from distutils import log -from distutils.errors import DistutilsError - -import pkg_resources -from setuptools.wheel import Wheel - - -def _fixup_find_links(find_links): - """Ensure find-links option end-up being a list of strings.""" - if isinstance(find_links, str): - return find_links.split() - assert isinstance(find_links, (tuple, list)) - return find_links - - -def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME - """Fetch an egg needed for building. - - Use pip/wheel to fetch/build a wheel.""" - # Warn if wheel is not available - try: - pkg_resources.get_distribution('wheel') - except pkg_resources.DistributionNotFound: - dist.announce('WARNING: The wheel package is not available.', log.WARN) - # Ignore environment markers; if supplied, it is required. - req = strip_marker(req) - # Take easy_install options into account, but do not override relevant - # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll - # take precedence. - opts = dist.get_option_dict('easy_install') - if 'allow_hosts' in opts: - raise DistutilsError('the `allow-hosts` option is not supported ' - 'when using pip to install requirements.') - quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ - if 'PIP_INDEX_URL' in os.environ: - index_url = None - elif 'index_url' in opts: - index_url = opts['index_url'][1] - else: - index_url = None - find_links = ( - _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts - else [] - ) - if dist.dependency_links: - find_links.extend(dist.dependency_links) - eggs_dir = os.path.realpath(dist.get_egg_cache_dir()) - environment = pkg_resources.Environment() - for egg_dist in pkg_resources.find_distributions(eggs_dir): - if egg_dist in req and environment.can_add(egg_dist): - return egg_dist - with tempfile.TemporaryDirectory() as tmpdir: - cmd = [ - sys.executable, '-m', 'pip', - '--disable-pip-version-check', - 'wheel', '--no-deps', - '-w', tmpdir, - ] - if quiet: - cmd.append('--quiet') - if index_url is not None: - cmd.extend(('--index-url', index_url)) - for link in find_links or []: - cmd.extend(('--find-links', link)) - # If requirement is a PEP 508 direct URL, directly pass - # the URL to pip, as `req @ url` does not work on the - # command line. - cmd.append(req.url or str(req)) - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - raise DistutilsError(str(e)) from e - wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0]) - dist_location = os.path.join(eggs_dir, wheel.egg_name()) - wheel.install_as_egg(dist_location) - dist_metadata = pkg_resources.PathMetadata( - dist_location, os.path.join(dist_location, 'EGG-INFO')) - dist = pkg_resources.Distribution.from_filename( - dist_location, metadata=dist_metadata) - return dist - - -def strip_marker(req): - """ - Return a new requirement without the environment marker to avoid - calling pip with something like `babel; extra == "i18n"`, which - would always be ignored. - """ - # create a copy to avoid mutating the input - req = pkg_resources.Requirement.parse(str(req)) - req.marker = None - return req diff --git a/spaces/qi3/White-box-Cartoonization/README.md b/spaces/qi3/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/qi3/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/qinzhu/diy-girlfriend-online/commons.py b/spaces/qinzhu/diy-girlfriend-online/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend-online/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/quidiaMuxgu/Expedit-SAM/CleanMyPC 1.10.5.2041 Crack !!TOP!! Full.md b/spaces/quidiaMuxgu/Expedit-SAM/CleanMyPC 1.10.5.2041 Crack !!TOP!! Full.md deleted file mode 100644 index 3594d08ea7e1ccac2d2ad3f47787a3a2821eb2b2..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/CleanMyPC 1.10.5.2041 Crack !!TOP!! Full.md +++ /dev/null @@ -1,30 +0,0 @@ - -

                CleanMyPC 1.10.5.2041 Crack Full: A Powerful and Reliable PC Cleaner

                -

                If you are looking for a way to keep your Windows PC clean and fast, you might want to check out CleanMyPC 1.10.5.2041 Crack Full. This is a new cleaning and maintenance utility from MacPaw Inc., the developer of the popular CleanMyMac software. CleanMyPC 1.10.5.2041 Crack Full includes a suite of cleaning tools that can remove gigabytes of junk files, fix registry errors, uninstall unwanted programs, manage startup items, and more.

                -

                CleanMyPC 1.10.5.2041 Crack Full


                DOWNLOADhttps://geags.com/2uCszJ



                -

                In this article, we will review some of the main features of CleanMyPC 1.10.5.2041 Crack Full and show you how to download and install it on your PC.

                -

                Features of CleanMyPC 1.10.5.2041 Crack Full

                -
                  -
                • Caches & Logs: This tool scans your PC for temporary files, logs, cache files, and other data that can take up valuable disk space and slow down your system. You can review and delete these files with a few clicks.
                • -
                • Help Files: This tool removes help files that are associated with applications that you already use or don't need anymore. These files can also occupy a lot of space and clutter your PC.
                • -
                • Extra Languages: This tool deletes language files that are not used by your system or applications. These files can free up more space and improve your PC performance.
                • -
                • Additional Utilities: This tool offers a set of built-in utilities that can help you keep your PC reliable, fast, clean, and secure. These utilities include Secure Erase, Complete Uninstall, Registry Maintenance, Autorun, and Gadgets & Extensions Manager.
                • -
                • Secure Erase: This tool allows you to permanently delete files and folders from your PC, so that they cannot be recovered by any data recovery software. This is useful when you want to get rid of sensitive or confidential data.
                • -
                • Complete Uninstall: This tool helps you uninstall programs from your PC completely, without leaving any traces or leftovers behind. This can prevent registry errors, conflicts, and performance issues.
                • -
                • Registry Maintenance: This tool scans your PC for registry errors and fixes them automatically. The registry is a database that stores settings and options for your system and applications. A clean and error-free registry can improve your PC stability and speed.
                • -
                • Autorun: This tool lets you manage the programs that run automatically when you start your PC. You can disable or enable any program from the list, or add new ones. This can help you optimize your PC startup time and performance.
                • -
                • Gadgets & Extensions Manager: This tool helps you manage the gadgets and extensions that are installed on your PC. You can remove any gadget or extension that you don't use or need anymore, or disable them temporarily.
                • -
                -

                How to Download and Install CleanMyPC 1.10.5.2041 Crack Full

                -

                If you want to try out CleanMyPC 1.10.5.2041 Crack Full on your PC, you can follow these simple steps:

                -
                  -
                1. Download the CleanMyPC 1.10.5.2041 Crack Full file from the link below[^1^].
                2. -
                3. Extract the file using WinRAR or any other extraction software.
                4. -
                5. Run the setup file and follow the instructions to install CleanMyPC 1.10.5.2041 Crack Full on your PC.
                6. -
                7. Copy the patch file from the crack folder and paste it into the installation directory of CleanMyPC 1.10.5.2041 Crack Full.
                8. -
                9. Run the patch file as administrator and click on the patch button.
                10. -
                11. Enjoy CleanMyPC 1.10.5.2041 Crack Full on your PC!
                12. -
                -

                CleanMyPC 1.10.5.2041 Crack Full is a powerful and reliable PC cleaner that can help you keep your Windows PC in

                d5da3c52bf
                -
                -
                \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Drdepth 4 0 10 Keygen Tsrh LINK.md b/spaces/quidiaMuxgu/Expedit-SAM/Drdepth 4 0 10 Keygen Tsrh LINK.md deleted file mode 100644 index 6a5e7705ff07a0fcab84462703764236a9e2de7c..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Drdepth 4 0 10 Keygen Tsrh LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

                Drdepth 4 0 10 Keygen Tsrh


                Download Zip →→→ https://geags.com/2uCsLN



                -
                -Drdepth 4 0 10 Keygen Tsrh · digital art cyber controller 1.3 downloadbfdcm · game Men of war assault squad v2.05.15 trainer Limited 17 4d29de3e1b
                -
                -
                -

                diff --git a/spaces/rachana219/MODT2/LICENSE.md b/spaces/rachana219/MODT2/LICENSE.md deleted file mode 100644 index f288702d2fa16d3cdf0035b15a9fcbc552cd88e7..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/LICENSE.md +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/lib/mesh_util.py b/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/lib/mesh_util.py deleted file mode 100644 index 39934219011401e194c61cc00034b12dad4072d3..0000000000000000000000000000000000000000 --- a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/lib/mesh_util.py +++ /dev/null @@ -1,91 +0,0 @@ -from skimage import measure -import numpy as np -import torch -from .sdf import create_grid, eval_grid_octree, eval_grid -from skimage import measure - - -def reconstruction(net, cuda, calib_tensor, - resolution, b_min, b_max, - use_octree=False, num_samples=10000, transform=None): - ''' - Reconstruct meshes from sdf predicted by the network. - :param net: a BasePixImpNet object. call image filter beforehead. - :param cuda: cuda device - :param calib_tensor: calibration tensor - :param resolution: resolution of the grid cell - :param b_min: bounding box corner [x_min, y_min, z_min] - :param b_max: bounding box corner [x_max, y_max, z_max] - :param use_octree: whether to use octree acceleration - :param num_samples: how many points to query each gpu iteration - :return: marching cubes results. - ''' - # First we create a grid by resolution - # and transforming matrix for grid coordinates to real world xyz - coords, mat = create_grid(resolution, resolution, resolution, - b_min, b_max, transform=transform) - - # Then we define the lambda function for cell evaluation - def eval_func(points): - points = np.expand_dims(points, axis=0) - points = np.repeat(points, net.num_views, axis=0) - samples = torch.from_numpy(points).to(device=cuda).float() - net.query(samples, calib_tensor) - pred = net.get_preds()[0][0] - return pred.detach().cpu().numpy() - - # Then we evaluate the grid - if use_octree: - sdf = eval_grid_octree(coords, eval_func, num_samples=num_samples) - else: - sdf = eval_grid(coords, eval_func, num_samples=num_samples) - - # Finally we do marching cubes - try: - verts, faces, normals, values = measure.marching_cubes_lewiner(sdf, 0.5) - # transform verts into world coordinate system - verts = np.matmul(mat[:3, :3], verts.T) + mat[:3, 3:4] - verts = verts.T - return verts, faces, normals, values - except: - print('error cannot marching cubes') - return -1 - - -def save_obj_mesh(mesh_path, verts, faces): - file = open(mesh_path, 'w') - - for v in verts: - file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2])) - for f in faces: - f_plus = f + 1 - file.write('f %d %d %d\n' % (f_plus[0], f_plus[2], f_plus[1])) - file.close() - - -def save_obj_mesh_with_color(mesh_path, verts, faces, colors): - file = open(mesh_path, 'w') - - for idx, v in enumerate(verts): - c = colors[idx] - file.write('v %.4f %.4f %.4f %.4f %.4f %.4f\n' % (v[0], v[1], v[2], c[0], c[1], c[2])) - for f in faces: - f_plus = f + 1 - file.write('f %d %d %d\n' % (f_plus[0], f_plus[2], f_plus[1])) - file.close() - - -def save_obj_mesh_with_uv(mesh_path, verts, faces, uvs): - file = open(mesh_path, 'w') - - for idx, v in enumerate(verts): - vt = uvs[idx] - file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2])) - file.write('vt %.4f %.4f\n' % (vt[0], vt[1])) - - for f in faces: - f_plus = f + 1 - file.write('f %d/%d %d/%d %d/%d\n' % (f_plus[0], f_plus[0], - f_plus[2], f_plus[2], - f_plus[1], f_plus[1])) - file.close() diff --git a/spaces/radames/Real-Time-Latent-Consistency-Model/controlnet/tailwind.config.js b/spaces/radames/Real-Time-Latent-Consistency-Model/controlnet/tailwind.config.js deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/inference/config.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/inference/config.py deleted file mode 100644 index f6b881852fa1feed3ffe14aab9eb23cf89b750ed..0000000000000000000000000000000000000000 --- a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/inference/config.py +++ /dev/null @@ -1,58 +0,0 @@ -from collections import OrderedDict - -from spiga.data.loaders.dl_config import DatabaseStruct - -MODELS_URL = {'wflw': 'https://drive.google.com/uc?export=download&confirm=yes&id=1h0qA5ysKorpeDNRXe9oYkVcVe8UYyzP7', - '300wpublic': 'https://drive.google.com/uc?export=download&confirm=yes&id=1YrbScfMzrAAWMJQYgxdLZ9l57nmTdpQC', - '300wprivate': 'https://drive.google.com/uc?export=download&confirm=yes&id=1fYv-Ie7n14eTD0ROxJYcn6SXZY5QU9SM', - 'merlrav': 'https://drive.google.com/uc?export=download&confirm=yes&id=1GKS1x0tpsTVivPZUk_yrSiMhwEAcAkg6', - 'cofw68': 'https://drive.google.com/uc?export=download&confirm=yes&id=1fYv-Ie7n14eTD0ROxJYcn6SXZY5QU9SM'} - - -class ModelConfig(object): - - def __init__(self, dataset_name=None, load_model_url=True): - # Model configuration - self.model_weights = None - self.model_weights_path = None - self.load_model_url = load_model_url - self.model_weights_url = None - # Pretreatment - self.focal_ratio = 1.5 # Camera matrix focal length ratio. - self.target_dist = 1.6 # Target distance zoom in/out around face. - self.image_size = (256, 256) - # Outputs - self.ftmap_size = (64, 64) - # Dataset - self.dataset = None - - if dataset_name is not None: - self.update_with_dataset(dataset_name) - - def update_with_dataset(self, dataset_name): - - config_dict = {'dataset': DatabaseStruct(dataset_name), - 'model_weights': 'spiga_%s.pt' % dataset_name} - - if dataset_name == 'cofw68': # Test only - config_dict['model_weights'] = 'spiga_300wprivate.pt' - - if self.load_model_url: - config_dict['model_weights_url'] = MODELS_URL[dataset_name] - - self.update(config_dict) - - def update(self, params_dict): - state_dict = self.state_dict() - for k, v in params_dict.items(): - if k in state_dict or hasattr(self, k): - setattr(self, k, v) - else: - raise Warning('Unknown option: {}: {}'.format(k, v)) - - def state_dict(self): - state_dict = OrderedDict() - for k in self.__dict__.keys(): - if not k.startswith('_'): - state_dict[k] = getattr(self, k) - return state_dict diff --git a/spaces/raedeXanto/academic-chatgpt-beta/!FREE! Crack No Cd Fear Perseus Mandate.md b/spaces/raedeXanto/academic-chatgpt-beta/!FREE! Crack No Cd Fear Perseus Mandate.md deleted file mode 100644 index 5b5291c2e4c8cc868e1c2f2f7578aaad05513451..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/!FREE! Crack No Cd Fear Perseus Mandate.md +++ /dev/null @@ -1,31 +0,0 @@ - -``` -

                How to Play F.E.A.R.: Perseus Mandate Without a CD

                -

                F.E.A.R.: Perseus Mandate is a standalone expansion pack for the first-person shooter game F.E.A.R., released in 2007. It follows a different storyline from the original game, focusing on a new team of F.E.A.R. operatives who are sent to investigate a secret project called Perseus.

                -

                If you own a copy of F.E.A.R.: Perseus Mandate, you might want to play it without having to insert the CD every time. This can be useful if you have lost or damaged your CD, or if you want to save some disk space by uninstalling the game from your hard drive.

                -

                Crack No Cd Fear Perseus Mandate


                Download File ->->->-> https://tinourl.com/2uL0xq



                -

                Fortunately, there are ways to play F.E.A.R.: Perseus Mandate without a CD, using a No-CD patch or a No-DVD image. These are files that modify or replace the game's executable file, allowing it to run without checking for the CD. However, you should be careful when downloading and using these files, as they might contain viruses or malware, or violate the game's terms of service.

                -

                Here are some steps to play F.E.A.R.: Perseus Mandate without a CD:

                -
                  -
                1. Make sure you have installed the game on your computer and updated it to the latest version.
                2. -
                3. Backup your original game executable file (fearpm.exe) in case something goes wrong.
                4. -
                5. Download a No-CD patch or a No-DVD image from a reputable source, such as GameCopyWorld or MegaGames. Make sure you choose the file that matches your game version and language.
                6. -
                7. Extract the downloaded file and copy it to your game installation folder, overwriting the original fearpm.exe file.
                8. -
                9. Run the game as usual and enjoy!
                10. -
                -

                Note: Some No-CD patches or No-DVD images might not work with online multiplayer mode or with certain anti-virus software. If you encounter any problems, try using a different file or restoring your original fearpm.exe file.

                -``` - -``` -

                If you are wondering what F.E.A.R.: Perseus Mandate is about, here is a brief overview of the gameplay:

                -
                  -
                • F.E.A.R.: Perseus Mandate is a first-person shooter game with elements of horror and stealth. You play as a member of a new F.E.A.R. team that is sent to investigate the Perseus project, a secret experiment involving psychic soldiers and paranormal phenomena.
                • -
                • The game features a variety of weapons and equipment, such as pistols, shotguns, assault rifles, grenades, mines, turrets, and night vision goggles. You can also use your reflexes to slow down time and gain an advantage over your enemies.
                • -
                • The game has a linear storyline that is divided into 10 intervals, each with multiple levels. You will encounter different types of enemies, such as mercenaries, clones, ghosts, and supernatural creatures. You will also face some boss battles and puzzles along the way.
                • -
                • The game has a single-player campaign that lasts about 6 hours. You can also play the game in multiplayer mode, which supports up to 16 players online or on LAN. The multiplayer mode offers several modes, such as deathmatch, team deathmatch, capture the flag, and elimination.
                • -
                -

                F.E.A.R.: Perseus Mandate is a thrilling and challenging game that will keep you on the edge of your seat. If you are a fan of F.E.A.R. or horror games in general, you might want to give it a try.

                -

                -```

                cec2833e83
                -
                -
                \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Diet MP3 04.03.00 Serial Key Keygen The Ultimate Guide to MP3 Compression.md b/spaces/raedeXanto/academic-chatgpt-beta/Diet MP3 04.03.00 Serial Key Keygen The Ultimate Guide to MP3 Compression.md deleted file mode 100644 index c1ed03411cdd5d2d46c14e002bebca8ba63dfc88..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Diet MP3 04.03.00 Serial Key Keygen The Ultimate Guide to MP3 Compression.md +++ /dev/null @@ -1,91 +0,0 @@ -
                -

                Diet MP3 04.03.00 Serial Key keygen: How to Compress MP3 Files Without Losing Quality

                -

                Introduction

                -

                MP3 is one of the most popular audio formats in the world, thanks to its high compatibility and portability. However, MP3 files can also take up a lot of disk space and bandwidth, especially if you have a large music collection or want to share your audio files online.

                -

                Diet MP3 04.03.00 Serial Key keygen


                DOWNLOAD ……… https://tinourl.com/2uL2RJ



                -

                That's why you might need a tool like Diet MP3 04.03.00 Serial Key keygen, which can help you compress MP3 files without losing quality. In this article, we will show you what Diet MP3 is, how to use it, and what benefits it can bring you.

                -

                What is Diet MP3?

                -

                Diet MP3 is a software that can reduce the size of MP3 files by up to 90%, while maintaining the original sound quality. It does this by using a smart algorithm that removes the redundant and irrelevant parts of the audio data, such as silence, noise, and low-frequency sounds.

                -

                Diet MP3 04.03.00 Serial Key keygen is the latest version of Diet MP3, which comes with a serial key that can activate the full features of the software. With Diet MP3 04.03.00 Serial Key keygen, you can compress unlimited MP3 files with ease and efficiency.

                -

                Why do you need to compress MP3 files?

                -

                There are many reasons why you might want to compress your MP3 files, such as:

                -
                  -
                • Save disk space on your computer, phone, or other devices.
                • -
                • Reduce the upload and download time of your audio files on the internet.
                • -
                • Fit more songs on your CD, DVD, or USB drive.
                • -
                • Avoid exceeding the file size limit of some online platforms or services.
                • -
                • Make your audio files more compatible with different players and devices.
                • -
                -

                By compressing your MP3 files with Diet MP3 04.03.00 Serial Key keygen, you can achieve all these goals without sacrificing the sound quality of your music.

                -

                How to use Diet MP3 04.03.00 Serial Key keygen

                -

                Using Diet MP3 04.03.00 Serial Key keygen is very simple and straightforward. Here are the steps you need to follow:

                -

                Diet MP3 04.03.00 crack download
                -Diet MP3 04.03.00 license key generator
                -Diet MP3 04.03.00 activation code free
                -Diet MP3 04.03.00 full version with serial key
                -Diet MP3 04.03.00 compressed mp3 software
                -Diet MP3 04.03.00 reduce mp3 file size
                -Diet MP3 04.03.00 optimize mp3 quality
                -Diet MP3 04.03.00 defrag mp3 files
                -Diet MP3 04.03.00 zip mp3 files
                -Diet MP3 04.03.00 batch mp3 processing
                -Diet MP3 04.03.00 mp3 converter and resizer
                -Diet MP3 04.03.00 mp3 shrinker and splitter
                -Diet MP3 04.03.00 mp3 trimmer and cutter
                -Diet MP3 04.03.00 mp3 editor and enhancer
                -Diet MP3 04.03.00 mp3 merger and joiner
                -Diet MP3 04.03.00 mp3 extractor and ripper
                -Diet MP3 04.03.00 mp3 tagger and renamer
                -Diet MP3 04.03.00 mp3 organizer and sorter
                -Diet MP3 04.03.00 mp3 player and burner
                -Diet MP3 04.03.00 mp3 downloader and uploader
                -Diet MP3 04.03.00 review and rating
                -Diet MP3 04.03.00 tutorial and guide
                -Diet MP3 04.03.00 features and benefits
                -Diet MP3 04.03.00 pros and cons
                -Diet MP3 04.03.00 comparison and alternatives
                -Diet MP3 04.03.00 discount and coupon code
                -Diet MP3 04.03.00 free trial and refund policy
                -Diet MP3 04.03.00 customer support and feedback
                -Diet MP3 04.03.00 system requirements and compatibility
                -Diet MP3 04.03.00 installation and uninstallation
                -How to use Diet MP3 04.03.00 serial key keygen
                -How to get Diet MP3 04.03.00 serial key keygen for free
                -How to fix Diet MP3 04.03.00 serial key keygen errors
                -How to update Diet MP3 04.03.00 serial key keygen
                -How to uninstall Diet MP3 04.03.00 serial key keygen completely
                -Is Diet MP3 04.03.00 serial key keygen safe and legit
                -Is Diet MP3 04.03.00 serial key keygen worth it
                -Is Diet MP3 04

                -

                Download and install Diet MP3

                -

                You can download Diet MP3 04.03.00 Serial Key keygen from one of the web search results . After downloading the file, unzip it and run the setup.exe file to install Diet MP3 on your computer.

                -

                Select the MP3 files you want to compress

                -

                After installing Diet MP3, launch it and click on the "Add Files" button to browse and select the MP3 files you want to compress. You can also drag and drop your files into the main window of Diet MP3.

                -

                Choose the compression level and output folder

                -

                Next, you need to choose how much you want to compress your MP3 files. You can use the slider at the bottom of Diet MP3 to adjust the compression level from 0% (no compression) to 90% (maximum compression). You can also preview the original and compressed file sizes on the right side of Diet MP3.

                -

                Then, you need to choose where you want to save your compressed files. You can either overwrite the original files or create new ones in a different folder. You can also rename your compressed files if you want.

                -

                Start the compression process

                -

                Benefits of using Diet MP3 04.03.00 Serial Key keygen

                -

                By using Diet MP3 04.03.00 Serial Key keygen, you can enjoy many benefits, such as:

                -
                  -
                • Save disk space and bandwidth: You can reduce the size of your MP3 files by up to 90%, which means you can store more files on your devices and share them faster on the internet.
                • -
                • Preserve the original sound quality: Diet MP3 uses a smart algorithm that only removes the unnecessary parts of the audio data, while keeping the essential ones. This way, you can compress your MP3 files without losing quality.
                • -
                • Support batch processing and drag-and-drop: Diet MP3 allows you to compress multiple MP3 files at once, saving you time and effort. You can also drag and drop your files into Diet MP3, making it easier to use.
                • -
                -

                Conclusion

                -

                Diet MP3 04.03.00 Serial Key keygen is a powerful and easy-to-use tool that can help you compress your MP3 files without losing quality. It can save you disk space and bandwidth, while preserving the original sound quality of your music. It also supports batch processing and drag-and-drop, making it convenient and efficient to use.

                -

                If you are looking for a way to compress your MP3 files without compromising the sound quality, you should try Diet MP3 04.03.00 Serial Key keygen. You can download it from one of the web search results and start compressing your MP3 files in minutes.

                -

                FAQs

                -

                Q: Is Diet MP3 04.03.00 Serial Key keygen safe to use?

                -

                A: Yes, Diet MP3 04.03.00 Serial Key keygen is safe to use. It does not contain any viruses, malware, or spyware. It also does not damage or modify your original MP3 files.

                -

                Q: How long does it take to compress an MP3 file with Diet MP3 04.03.00 Serial Key keygen?

                -

                A: The compression time depends on the size and number of your MP3 files, as well as the compression level you choose. Generally, it takes a few seconds to a few minutes to compress an MP3 file with Diet MP3 04.03.00 Serial Key keygen.

                -

                Q: Can I compress other audio formats with Diet MP3 04.03.00 Serial Key keygen?

                -

                A: No, Diet MP3 04.03.00 Serial Key keygen only supports compressing MP3 files. If you want to compress other audio formats, such as WAV, WMA, OGG, or FLAC, you need to convert them to MP3 first.

                -

                Q: Can I adjust the sound quality of the compressed MP3 files with Diet MP3 04.03.00 Serial Key keygen?

                -

                A: Yes, you can adjust the sound quality of the compressed MP3 files with Diet MP3 04.03.00 Serial Key keygen by choosing different compression levels. The higher the compression level, the smaller the file size, but the lower the sound quality.

                -

                Q: Where can I get more information about Diet MP3 04.03.00 Serial Key keygen?

                -

                A: You can get more information about Diet MP3 04.03.00 Serial Key keygen by visiting its official website or contacting its customer support.

                -

                0a6ba089eb
                -
                -
                \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Film John Carter Subtitle Indonesia Mp4.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Film John Carter Subtitle Indonesia Mp4.md deleted file mode 100644 index 712e15c11c3c0f0ccfeba7022f542aaab4ef6c81..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Film John Carter Subtitle Indonesia Mp4.md +++ /dev/null @@ -1,21 +0,0 @@ - -

                How to Download Film John Carter Subtitle Indonesia Mp4 for Free

                -

                If you are a fan of sci-fi and adventure movies, you might have heard of John Carter, a 2012 film based on the novel by Edgar Rice Burroughs. The film tells the story of a former Civil War captain who is mysteriously transported to Mars, where he becomes involved in an epic conflict between the planet's inhabitants. The film features stunning visual effects, thrilling action scenes, and a captivating plot.

                -

                But what if you want to watch the film with Indonesian subtitles? Or what if you want to download the film in MP4 format so you can watch it offline on your device? Well, you are in luck, because in this article, we will show you how to download film John Carter subtitle Indonesia MP4 for free. All you need is a reliable internet connection and some free software.

                -

                Download Film John Carter Subtitle Indonesia Mp4


                Download ★★★ https://tinourl.com/2uL0nu



                -

                Step 1: Find a website that offers the film with subtitles

                -

                The first step is to find a website that offers the film with subtitles. There are many websites that provide this service, but not all of them are safe and legal. Some websites may contain viruses, malware, or pop-up ads that can harm your device or compromise your privacy. Therefore, you should be careful and choose a reputable website that has good reviews and ratings from other users.

                -

                One of the websites that we recommend is Adikfilm, which is a popular site for downloading and streaming movies with various subtitles. Adikfilm has a large collection of movies from different genres and countries, and it updates its content regularly. You can find John Carter on Adikfilm by typing the title in the search box or browsing the categories.

                -

                Step 2: Choose the quality and format of the film

                -

                The next step is to choose the quality and format of the film that you want to download. Adikfilm offers different options for downloading the film, such as 360p, 480p, 720p, or 1080p. You can also choose between Google Drive, Openload, Mirror, or Mega as the download server. Depending on your internet speed and device storage, you can choose the option that suits your needs.

                -

                For this article, we will choose 720p as the quality and Google Drive as the download server. To do this, simply click on the button that says "Download 720p" under Google Drive. This will open a new tab that will direct you to the Google Drive page where the film is stored.

                -

                Step 3: Download the film from Google Drive

                -

                The final step is to download the film from Google Drive. To do this, you need to have a Google account and sign in to it. If you don't have one, you can create one for free by following the instructions on the screen.

                -

                Once you are signed in to your Google account, you will see a preview of the film on the Google Drive page. To download it, click on the three dots icon at the top right corner of the screen and select "Download". This will start downloading the film to your device in MP4 format.

                -

                -

                Note that some films may be too large to scan for viruses by Google Drive. In this case, you will see a warning message that says "This file may be too large to scan for viruses". Don't worry, this does not mean that the file is infected. It just means that Google Drive cannot verify its safety. You can still download it by clicking on "Download anyway".

                -

                Conclusion

                -

                Congratulations! You have successfully downloaded film John Carter subtitle Indonesia MP4 for free. Now you can enjoy watching this amazing movie with Indonesian subtitles anytime and anywhere. We hope this article was helpful and informative for you.

                -

                If you liked this article, please share it with your friends and family who might also be interested in downloading film John Carter subtitle Indonesia MP4 for free. And if you have any questions or feedback, please leave them in the comments section below. We would love to hear from you.

                81aa517590
                -
                -
                \ No newline at end of file diff --git a/spaces/raghung/Play-Canvas-Sim/README.md b/spaces/raghung/Play-Canvas-Sim/README.md deleted file mode 100644 index b7c79635eeff36a890666144848d3aafa431ab36..0000000000000000000000000000000000000000 --- a/spaces/raghung/Play-Canvas-Sim/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Play Canvas Sim -emoji: 💩 -colorFrom: yellow -colorTo: pink -sdk: static -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ramiin2/AutoGPT/autogpt/commands/google_search.py b/spaces/ramiin2/AutoGPT/autogpt/commands/google_search.py deleted file mode 100644 index 7d38ce7568d2de207d521b077cfebd72527c9795..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/autogpt/commands/google_search.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Google search command for Autogpt.""" -from __future__ import annotations - -import json - -from duckduckgo_search import ddg - -from autogpt.config import Config - -CFG = Config() - - -def google_search(query: str, num_results: int = 8) -> str: - """Return the results of a Google search - - Args: - query (str): The search query. - num_results (int): The number of results to return. - - Returns: - str: The results of the search. - """ - search_results = [] - if not query: - return json.dumps(search_results) - - results = ddg(query, max_results=num_results) - if not results: - return json.dumps(search_results) - - for j in results: - search_results.append(j) - - return json.dumps(search_results, ensure_ascii=False, indent=4) - - -def google_official_search(query: str, num_results: int = 8) -> str | list[str]: - """Return the results of a Google search using the official Google API - - Args: - query (str): The search query. - num_results (int): The number of results to return. - - Returns: - str: The results of the search. - """ - - from googleapiclient.discovery import build - from googleapiclient.errors import HttpError - - try: - # Get the Google API key and Custom Search Engine ID from the config file - api_key = CFG.google_api_key - custom_search_engine_id = CFG.custom_search_engine_id - - # Initialize the Custom Search API service - service = build("customsearch", "v1", developerKey=api_key) - - # Send the search query and retrieve the results - result = ( - service.cse() - .list(q=query, cx=custom_search_engine_id, num=num_results) - .execute() - ) - - # Extract the search result items from the response - search_results = result.get("items", []) - - # Create a list of only the URLs from the search results - search_results_links = [item["link"] for item in search_results] - - except HttpError as e: - # Handle errors in the API call - error_details = json.loads(e.content.decode()) - - # Check if the error is related to an invalid or missing API key - if error_details.get("error", {}).get( - "code" - ) == 403 and "invalid API key" in error_details.get("error", {}).get( - "message", "" - ): - return "Error: The provided Google API key is invalid or missing." - else: - return f"Error: {e}" - - # Return the list of search result URLs - return search_results_links diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/timers.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/timers.d.ts deleted file mode 100644 index b26f3cedabff61dd1580a56540f3b911eb9ecfe6..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/timers.d.ts +++ /dev/null @@ -1,94 +0,0 @@ -/** - * The `timer` module exposes a global API for scheduling functions to - * be called at some future period of time. Because the timer functions are - * globals, there is no need to call `require('timers')` to use the API. - * - * The timer functions within Node.js implement a similar API as the timers API - * provided by Web Browsers but use a different internal implementation that is - * built around the Node.js [Event Loop](https://nodejs.org/en/docs/guides/event-loop-timers-and-nexttick/#setimmediate-vs-settimeout). - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/timers.js) - */ -declare module 'timers' { - import { Abortable } from 'node:events'; - import { setTimeout as setTimeoutPromise, setImmediate as setImmediatePromise, setInterval as setIntervalPromise } from 'node:timers/promises'; - interface TimerOptions extends Abortable { - /** - * Set to `false` to indicate that the scheduled `Timeout` - * should not require the Node.js event loop to remain active. - * @default true - */ - ref?: boolean | undefined; - } - let setTimeout: typeof global.setTimeout; - let clearTimeout: typeof global.clearTimeout; - let setInterval: typeof global.setInterval; - let clearInterval: typeof global.clearInterval; - let setImmediate: typeof global.setImmediate; - let clearImmediate: typeof global.clearImmediate; - global { - namespace NodeJS { - // compatibility with older typings - interface Timer extends RefCounted { - hasRef(): boolean; - refresh(): this; - [Symbol.toPrimitive](): number; - } - interface Immediate extends RefCounted { - /** - * If true, the `Immediate` object will keep the Node.js event loop active. - * @since v11.0.0 - */ - hasRef(): boolean; - _onImmediate: Function; // to distinguish it from the Timeout class - } - interface Timeout extends Timer { - /** - * If true, the `Timeout` object will keep the Node.js event loop active. - * @since v11.0.0 - */ - hasRef(): boolean; - /** - * Sets the timer's start time to the current time, and reschedules the timer to - * call its callback at the previously specified duration adjusted to the current - * time. This is useful for refreshing a timer without allocating a new - * JavaScript object. - * - * Using this on a timer that has already called its callback will reactivate the - * timer. - * @since v10.2.0 - * @return a reference to `timeout` - */ - refresh(): this; - [Symbol.toPrimitive](): number; - } - } - function setTimeout(callback: (...args: TArgs) => void, ms?: number, ...args: TArgs): NodeJS.Timeout; - // util.promisify no rest args compability - // tslint:disable-next-line void-return - function setTimeout(callback: (args: void) => void, ms?: number): NodeJS.Timeout; - namespace setTimeout { - const __promisify__: typeof setTimeoutPromise; - } - function clearTimeout(timeoutId: NodeJS.Timeout | string | number | undefined): void; - function setInterval(callback: (...args: TArgs) => void, ms?: number, ...args: TArgs): NodeJS.Timer; - // util.promisify no rest args compability - // tslint:disable-next-line void-return - function setInterval(callback: (args: void) => void, ms?: number): NodeJS.Timer; - namespace setInterval { - const __promisify__: typeof setIntervalPromise; - } - function clearInterval(intervalId: NodeJS.Timeout | string | number | undefined): void; - function setImmediate(callback: (...args: TArgs) => void, ...args: TArgs): NodeJS.Immediate; - // util.promisify no rest args compability - // tslint:disable-next-line void-return - function setImmediate(callback: (args: void) => void): NodeJS.Immediate; - namespace setImmediate { - const __promisify__: typeof setImmediatePromise; - } - function clearImmediate(immediateId: NodeJS.Immediate | undefined): void; - function queueMicrotask(callback: () => void): void; - } -} -declare module 'node:timers' { - export * from 'timers'; -} diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Abrosoft FaceMixer 3.0.1 Crack BEST.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Abrosoft FaceMixer 3.0.1 Crack BEST.md deleted file mode 100644 index 9ea03af22f78319eb9b10d4048f85f35de4c773a..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Abrosoft FaceMixer 3.0.1 Crack BEST.md +++ /dev/null @@ -1,143 +0,0 @@ -
                -

                Abrosoft FaceMixer 3.0.1 Crack: How to Create Amazing Face Composites with Ease

                - -

                If you are looking for a software that can help you create stunning face composites with multiple images, you might want to check out Abrosoft FaceMixer 3.0.1. This software is designed to mix up multiple faces into a magic "average face" or generate thousands of synthetic photo-realistic faces by age, gender and ethnicity. You can also use it to create fun and unique face morphing animations and videos.

                -

                Abrosoft FaceMixer 3.0.1 crack


                Download Zip ··· https://urlgoal.com/2uCM40



                - -

                However, Abrosoft FaceMixer 3.0.1 is not a free software. You need to pay $49.95 to get the full version with all the features and functions. If you don't want to spend that much money, you might be tempted to look for a crack version of Abrosoft FaceMixer 3.0.1 on the internet.

                - -

                What is Abrosoft FaceMixer 3.0.1 Crack?

                - -

                A crack version of Abrosoft FaceMixer 3.0.1 is a modified version of the original software that bypasses the activation process and allows you to use it without a serial number or a license key. A crack version of Abrosoft FaceMixer 3.0.1 might seem like a good deal, but it comes with many risks and disadvantages.

                - -

                The Risks and Disadvantages of Using Abrosoft FaceMixer 3.0.1 Crack

                - -

                First of all, using Abrosoft FaceMixer 3.0.1 crack is illegal and unethical. You are violating the intellectual property rights of the software developer and breaking the law. You could face legal consequences if you are caught using or distributing Abrosoft FaceMixer 3.0.1 crack.

                -

                - -

                Secondly, using Abrosoft FaceMixer 3.0.1 crack is unsafe and unreliable. You never know what kind of malware or viruses are hidden in the crack files that you download from unknown sources on the internet. You could expose your computer and your personal data to hackers and cybercriminals who could steal your identity, money or information.

                - -

                Thirdly, using Abrosoft FaceMixer 3.0.1 crack is ineffective and unsatisfactory. You will not be able to enjoy the full features and functions of the original software, as the crack version might be outdated, incomplete or corrupted. You will also not be able to get any updates, support or customer service from the software developer if you encounter any problems or issues with Abrosoft FaceMixer 3.0.1 crack.

                - -

                The Best Way to Use Abrosoft FaceMixer 3.0.1

                - -

                The best way to use Abrosoft FaceMixer 3.0.1 is to buy the original software from the official website of Abrosoft (https://www.abrosoft.com/facemixer.html). You will get a serial number and a license key that will activate your software and allow you to use it without any limitations or restrictions.

                - -

                You will also get access to all the features and functions of Abrosoft FaceMixer 3.0.1, such as:

                - -
                  -
                • Intelligent face detection and facial feature extraction technique
                • -
                • Hardware acceleration and high speed rendering engine
                • -
                • Support for most image formats including BMP, JPEG, TIFF, PNG, GIF, TGA, PCX, WMF, EMF, AVI, FME
                • -
                • Ability to capture frames from webcam or video device
                • -
                • Ability to store edited faces in a classified face library
                • -
                • Skinnable user interface with three built-in skins: Vista, Mac, Gray
                • -
                • Multi-language support
                • -
                • Ability to export images or animations in various formats including BMP, JPEG, TIFF, PNG, TGA, PCX, GIF, AVI
                • -
                • Ability to share projects via email
                • -
                - -

                You will also get free updates, support and customer service from Abrosoft if you have any questions or problems with Abrosoft FaceMixer 3.0.1.

                - -
                Conclusion
                - -

                Abrosoft FaceMixer 3.0.1 is a great software for creating amazing face composites with multiple images. However, you should not use a crack version of Abrosoft FaceMixer 3.0.1 as it is illegal, unsafe and ineffective.

                - -

                The best way to use Abrosoft FaceMixer 3.0.1 is to buy the original software from the official website of Abrosoft and enjoy all the benefits and advantages that it offers.

                - -

                If you are interested in Abrosoft FaceMixer 3.0.1, you can download a free trial version from https://www.abrosoft.com/download.html and see how it works for yourself.

                -
                How to Download and Install Abrosoft FaceMixer 3.0.1
                - -

                If you decide to buy Abrosoft FaceMixer 3.0.1 from the official website of Abrosoft, you will need to follow these steps to download and install it on your computer:

                - -
                  -
                1. Go to https://www.abrosoft.com/download.html and click on the "Download" button for Abrosoft FaceMixer 3.0.1.
                2. -
                3. Save the setup file (facemixer_setup.exe) on your computer and run it.
                4. -
                5. Follow the instructions on the screen to complete the installation process.
                6. -
                7. Launch Abrosoft FaceMixer 3.0.1 and enter your serial number and license key that you received from Abrosoft via email.
                8. -
                9. Enjoy creating amazing face composites with Abrosoft FaceMixer 3.0.1.
                10. -
                - -

                If you have any problems or issues with downloading or installing Abrosoft FaceMixer 3.0.1, you can contact Abrosoft's customer service at support@abrosoft.com or visit their FAQ page at https://www.abrosoft.com/faq.html.

                - -
                How to Use Abrosoft FaceMixer 3.0.1
                - -

                Abrosoft FaceMixer 3.0.1 is very easy to use, even if you are a beginner. You can create amazing face composites with just a few clicks and drag-and-drop operations. Here are some basic steps to use Abrosoft FaceMixer 3.0.1:

                - -
                  -
                1. Import some source images that you want to mix up into a face composite. You can import images from your computer, from a webcam or video device, or from a built-in image viewer.
                2. -
                3. Select the faces that you want to mix up and adjust their positions and sizes on the mixing board.
                4. -
                5. Use the sliders to adjust the mixing ratio of each face and see the final composite in real time.
                6. -
                7. Use the tools to refine the face composite, such as smoothing, blending, erasing, cropping, rotating, etc.
                8. -
                9. Export the face composite as an image or an animation in various formats, such as BMP, JPEG, TIFF, PNG, TGA, PCX, GIF, AVI.
                10. -
                11. Share your face composite via email or save it in a classified face library for future use.
                12. -
                - -

                You can also use Abrosoft FaceMixer 3.0.1 to create fun and unique face morphing animations and videos by using different source images for each frame of the animation. You can also use different effects and transitions to make your animations more dynamic and interesting.

                - -
                Conclusion
                - -

                Abrosoft FaceMixer 3.0.1 is a great software for creating amazing face composites with multiple images. It is easy to use, fast and reliable. It offers many features and functions that allow you to create realistic and artistic face composites with ease.

                - -

                However, you should not use a crack version of Abrosoft FaceMixer 3.0.1 as it is illegal, unsafe and ineffective. The best way to use Abrosoft FaceMixer 3.0.1 is to buy the original software from the official website of Abrosoft and enjoy all the benefits and advantages that it offers.

                - -

                If you are interested in Abrosoft FaceMixer 3.0.1, you can download a free trial version from https://www.abrosoft.com/download.html and see how it works for yourself.

                -
                Some Examples of Face Composites Created with Abrosoft FaceMixer 3.0.1
                - -

                To give you some ideas of what you can do with Abrosoft FaceMixer 3.0.1, here are some examples of face composites created with this software:

                - -
                  -
                • Average face of 10 celebrities
                • -
                • Face morphing animation of a baby growing up
                • -
                • Face composite of a couple
                • -
                • Face composite of different ethnicities
                • -
                • Face composite of different animals
                • -
                - -

                You can see more examples of face composites created with Abrosoft FaceMixer 3.0.1 on their gallery page at https://www.abrosoft.com/gallery.html.

                - -
                Some Tips and Tricks for Using Abrosoft FaceMixer 3.0.1
                - -

                To help you get the most out of Abrosoft FaceMixer 3.0.1, here are some tips and tricks for using this software:

                - -
                  -
                • Use high-quality and clear source images for better results.
                • -
                • Align the eyes and mouths of the source images as much as possible.
                • -
                • Use the auto face detection and facial feature extraction features to save time and effort.
                • -
                • Use the preview window to see the changes in real time.
                • -
                • Use the undo and redo buttons to correct mistakes.
                • -
                • Use the mask tool to erase unwanted parts of the source images or the face composite.
                • -
                • Use the blend tool to smooth out the edges and transitions of the face composite.
                • -
                • Use the crop tool to adjust the size and shape of the face composite.
                • -
                • Use the rotate tool to change the angle and orientation of the face composite.
                • -
                • Use the effects tool to add some artistic touches to the face composite, such as color, brightness, contrast, etc.
                • -
                • Use the animation tool to create dynamic and fun face morphing animations and videos.
                • -
                • Use the settings tool to customize your preferences, such as skin, language, capture directory, etc.
                • -
                - -

                You can also refer to the user manual of Abrosoft FaceMixer 3.0.1 at https://www.abrosoft.com/manual.html for more detailed instructions and information.

                - -
                Conclusion
                - -

                Abrosoft FaceMixer 3.0.1 is a great software for creating amazing face composites with multiple images. It is easy to use, fast and reliable. It offers many features and functions that allow you to create realistic and artistic face composites with ease.

                - -

                However, you should not use a crack version of Abrosoft FaceMixer 3.0.1 as it is illegal, unsafe and ineffective. The best way to use Abrosoft FaceMixer 3.0.1 is to buy the original software from the official website of Abrosoft and enjoy all the benefits and advantages that it offers.

                - -

                If you are interested in Abrosoft FaceMixer 3.0.1, you can download a free trial version from https://www.abrosoft.com/download.html and see how it works for yourself.

                - -

                We hope this article has been helpful and informative for you. If you have any questions or comments about Abrosoft FaceMixer 3.0.1 or this article, please feel free to contact us or leave a comment below.

                - -

                Thank you for reading and have a nice day!

                -
                Conclusion
                - -

                Abrosoft FaceMixer 3.0.1 is a great software for creating amazing face composites with multiple images. It is easy to use, fast and reliable. It offers many features and functions that allow you to create realistic and artistic face composites with ease.

                - -

                However, you should not use a crack version of Abrosoft FaceMixer 3.0.1 as it is illegal, unsafe and ineffective. The best way to use Abrosoft FaceMixer 3.0.1 is to buy the original software from the official website of Abrosoft and enjoy all the benefits and advantages that it offers.

                - -

                If you are interested in Abrosoft FaceMixer 3.0.1, you can download a free trial version from https://www.abrosoft.com/download.html and see how it works for yourself.

                - -

                We hope this article has been helpful and informative for you. If you have any questions or comments about Abrosoft FaceMixer 3.0.1 or this article, please feel free to contact us or leave a comment below.

                - -

                Thank you for reading and have a nice day!

                3cee63e6c2
                -
                -
                \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/AutoCAD 2018 [64bit] Pre Release Incl ((NEW)) Keygen X FORCE [MUMBAI TPB].epub.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/AutoCAD 2018 [64bit] Pre Release Incl ((NEW)) Keygen X FORCE [MUMBAI TPB].epub.md deleted file mode 100644 index 48019c5465ca1f5146ace79e3f7e4058ca2535bb..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/AutoCAD 2018 [64bit] Pre Release Incl ((NEW)) Keygen X FORCE [MUMBAI TPB].epub.md +++ /dev/null @@ -1,30 +0,0 @@ -

                AutoCAD 2018 [64bit] Pre Release Incl Keygen X FORCE [MUMBAI TPB].epub


                Download File >>> https://urlgoal.com/2uCJea



                -
                -DOWNLOAD. This Document has an.docx you can use Word for the following document formats:.doc.rar.zip.rar.7z. - -If you have any problems using the product, contact CyberLink technical support. X-FORCE Epub ISBN 978-85626-191-7. About The Author. - -CyberLink PFA 2008 [64bit] Registration Key. PFA Crack gives you a lot of tools to help you create professional videos. You can cut, combine, and transform video clips, photos, and images. - -There are also tools for creating scripts, designing slideshows, and editing digital audio. You can also record TV shows from your PC, and convert your videos to 3D or 3D movies, or you can burn CDs or DVDs. - -Finally, there is the CyberLink Media Studio, a video editing program that can be used for all these purposes. The product's main advantage is that it is relatively easy to use and that it has powerful tools. - -The only disadvantage is that it is more expensive than other products that are better known. CyberLink is the developer of the Free Video Converter, which is a free program that you can use to convert video files to other formats. - -The program comes in a default installation file that can be expanded with additional codecs. The CyberLink software includes CyberLink PFA's X-FORCE PFA's Epub and PFA's PFA's PFA's. The product has many features that allow you to convert, edit, and burn your videos. - -The basic features are: The main advantage of CyberLink PFA's CyberLink Free Video Converter is that you can convert, edit, and burn videos easily. - -The software has many options for conversion, for example, you can convert AVI, MPG, AVI, WMV, MPEG, MPG, MP4, and MPEG to MP3. - -The program offers you the ability to edit any type of video you want, whether it is a video clip, a movie, a digital TV show, or a movie. - -There are five editing tools for video. - -The first tool is for cropping, which allows you to cut, combine, and transform the video. - -In the middle, there is the video effect tool, which lets you add many filters to your videos. You can adjust the brightness, contrast, and saturation of 4fefd39f24
                -
                -
                -

                diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Baghban 1 Hd Movie ((INSTALL)) Download In Hindi.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Baghban 1 Hd Movie ((INSTALL)) Download In Hindi.md deleted file mode 100644 index a3a3543d515d24754d77c74fa9495fefc9f8a203..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Baghban 1 Hd Movie ((INSTALL)) Download In Hindi.md +++ /dev/null @@ -1,17 +0,0 @@ -

                Baghban 1 Hd Movie Download In Hindi


                Download File ---> https://urlgoal.com/2uCKO9



                - -For more films Visit our official website (2003) HDRip Hindi Full Movie watch online HD print download - Todaypk Movies, Todaypkbaghban Hindi, Watch Baghban Hindi Full Movie Online ,. Download free Torrent Hindi Movie (HDRip). -Film: Baghban - Into The Heart Of Gander Mountain. -Released: 2012. -Genre: Action,. -Film: Hindi Movie (HDRip). -Released: 2011. -Genre: Action, drama, melodrama, thriller,. -Films - India - Movies - Movies in HD. -Genre: drama, historical, melodrama. -Watch online. . -Title in original: Baghban - Into The Heart Of Gander Mountain; Released: 2012; Producer: . -Released: 2011. 8a78ff9644
                -
                -
                -

                diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download The Forest 2016 Movie.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download The Forest 2016 Movie.md deleted file mode 100644 index d9354673f9e26034481e1012ee6f891623213a9a..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download The Forest 2016 Movie.md +++ /dev/null @@ -1,6 +0,0 @@ -

                download the forest 2016 movie


                DOWNLOADhttps://urlgoal.com/2uCKDT



                - -Download Huldra: Lady of the Forest 2016 Full Movie With English Subtitles.. 9.0/10 Performance : 9,769 visitors | 410 Criticisms. 1fdad05405
                -
                -
                -

                diff --git a/spaces/rgres/Seg2Sat/Dockerfile b/spaces/rgres/Seg2Sat/Dockerfile deleted file mode 100644 index 1ed6d90e4edea2351091b3730019f94b11b2e2c3..0000000000000000000000000000000000000000 --- a/spaces/rgres/Seg2Sat/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM python:3.9 -WORKDIR /app - -COPY ./requirements.txt /app/requirements.txt -RUN pip install --no-cache-dir --upgrade -r /app/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -ENV PORT=7860 \ - URL_GRADIO=https://rgres-seg2sat-api.hf.space/ - -COPY . . - -CMD ["python3", "app.py"] diff --git a/spaces/rorallitri/biomedical-language-models/logs/Aseema Beyond Boundaries Man 2 Movie Free Download with Subtitles.md b/spaces/rorallitri/biomedical-language-models/logs/Aseema Beyond Boundaries Man 2 Movie Free Download with Subtitles.md deleted file mode 100644 index 120e9d7904ee2b5576f28330b20dd49f1af5571f..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Aseema Beyond Boundaries Man 2 Movie Free Download with Subtitles.md +++ /dev/null @@ -1,6 +0,0 @@ -

                Aseema Beyond Boundaries man 2 movie free download


                Download https://tinurll.com/2uzmeZ



                - - aaccfb2cb3
                -
                -
                -

                diff --git a/spaces/rorallitri/biomedical-language-models/logs/Hannstar J Mv 6 94v 0 Pdf Downloadl.md b/spaces/rorallitri/biomedical-language-models/logs/Hannstar J Mv 6 94v 0 Pdf Downloadl.md deleted file mode 100644 index ef95d9753ea8772087f84d6df89a0b214030e222..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Hannstar J Mv 6 94v 0 Pdf Downloadl.md +++ /dev/null @@ -1,5 +0,0 @@ -
                -

                HP hannstar k mv-4 94v-0 schematics, hannstar j mv-4, hannstar motherboard, hannstar j. hannstar j mv-6 94v 0 pdf. 3 reviews on hannstar k mv-4 94v-0 schematics..McMurray McMurray is a surname. Notable people with the surname include: C.F. Murray (1856–1942), Scottish clergyman and principal of St Mary's College, St Andrews Frank MacMurray (1903–1985), American film actor Kathleen MacMurray (1898–1982), Australian track and field athlete Kenneth McMurray, Canadian cartoonist Paul Murray (archbishop) (1861–1921), Irish cleric and bishop of Killaloe, Clonfert and Kilmacduagh Paul Murray (actor) (1928–2009), British actor Paul McMurray (1932–1991), Canadian soccer player Peter McMurray (born 1958), South African boxer Robert McMurray (1880–1954), Scottish Olympic cyclist Russell McMurray (1890–1960), Australian Catholic bishop of Ararat and Armidale Tom McMurray (born 1941), Scottish footballer"It's fantastic. Everybody was in agreement," Jackson said after they made their picks. The the veteran point guard was quiet when the draft began, but he did speak up to some extent when he was asked about Allen Iverson's status for the upcoming season. Iverson still has a hamstring injury and has yet to practice this summer. "We've got a plan for him. We've talked about it. Him, LeBron, me -- we've got a plan," said Jackson. While both were on board with the decision, it seemed to rattle both of their teammates. "Him and LeBron just spoke tonight," said guard Gilbert Arenas. "They didn't want to do it, but they didn't want no one else to do it either." The result was a shock to many, but one of the reasons Jackson's career has blossomed was because he was able to choose his own coach. "It's great," he said. "From a business standpoint, I couldn't have asked for a better situation. I wanted to be able to pick my own coach, and I did. I got this guy and we're all on the same page." Allen Iverson has been cleared to start on opening night. The Sixers say they plan to start him in his opening game against the Los Angeles Clippers. The injury, a strained left hamstring, was initially thought to be serious, but Iverson has been cleared to play. Iverson passed a physical on Tuesday and practiced for the first time in several weeks. "I've been able to have guys push me around, knock me around, and it's the only way I get better," Iverson said. "I've been doing that since I was 12 years old." It's difficult to make a play for Iverson in the paint when he's laid out on the floor, but his passing and ability to initiate the offense has improved with time.

                -

                Hannstar J Mv 6 94v 0 Pdf Downloadl


                Download Zip 🔗 https://tinurll.com/2uzm7H



                899543212b
                -
                -
                \ No newline at end of file diff --git a/spaces/rubensmau/Dov_Tzamir/data_driven_characters/chatbots/retrieval.py b/spaces/rubensmau/Dov_Tzamir/data_driven_characters/chatbots/retrieval.py deleted file mode 100644 index 593efa414a17f4aa6797ee9333ecd849c4489f11..0000000000000000000000000000000000000000 --- a/spaces/rubensmau/Dov_Tzamir/data_driven_characters/chatbots/retrieval.py +++ /dev/null @@ -1,105 +0,0 @@ -import faiss -from tqdm import tqdm - -from langchain.chains import ConversationChain -from langchain.chat_models import ChatOpenAI -from langchain.docstore import InMemoryDocstore -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.memory import ( - ConversationBufferMemory, - CombinedMemory, -) -from langchain.prompts import PromptTemplate -from langchain.vectorstores import FAISS - -from data_driven_characters.memory import ConversationVectorStoreRetrieverMemory - -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.vectorstores import FAISS -import pickle -import os.path - - -class RetrievalChatBot: - def __init__(self, character_definition, documents): - self.character_definition = character_definition - self.documents = documents - self.num_context_memories = 10 - - self.chat_history_key = "chat_history" - self.context_key = "context" - self.input_key = "input" - - self.chain = self.create_chain(character_definition) - - def create_chain(self, character_definition): - conv_memory = ConversationBufferMemory( - memory_key=self.chat_history_key, input_key=self.input_key - ) - #embeddings = OpenAIEmbeddings() - #saved_db = FAISS.load_local('tzamir.ifass', embeddings) - context_memory = ConversationVectorStoreRetrieverMemory( - retriever=FAISS( - OpenAIEmbeddings().embed_query, - faiss.IndexFlatL2(1536), # Dimensions of the OpenAIEmbeddings - InMemoryDocstore({}), - {}, - ).as_retriever(search_kwargs=dict(k=self.num_context_memories)), - #retriever=saved_db.as_retriever(search_kwargs=dict(k=self.num_context_memories)), - memory_key=self.context_key, - output_prefix=character_definition.name, - blacklist=[self.chat_history_key], - ) - - # add the documents to the context memory if not saved on disk - memory_path = 'output/tzamir/memory.pkl' - if not os.path.exists(memory_path): - print("gerando os indices") - for i, summary in tqdm(enumerate(self.documents)): - context_memory.save_context(inputs={}, outputs={f"[{i}]": summary}) - # salvando no disco - memory_pickle = open('output/tzamir/memory.pkl', 'wb') - pickle.dump(context_memory, memory_pickle) - else: - print("carregando memoria do disco") - memory_pickle = open('output/tzamir/memory.pkl', 'rb') - context_memory = pickle.load(memory_pickle) - # Combined - memory = CombinedMemory(memories=[conv_memory, context_memory]) - #print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$") - #print(memory) - #print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$") - - prompt = PromptTemplate.from_template( - f"""Your name is {character_definition.name}. - -You will have a conversation with a Human, and you will engage in a dialogue with them. -You will not exaggerate your personality, interests, desires, emotions, and other traits. Keep your tone as objective as possible. -You will stay in character as {character_definition.name} throughout the conversation, even if the Human asks you questions that you don't know the answer to. -You will not break character as {character_definition.name}. - -You are {character_definition.name} in the following story snippets, which describe events in your life. ---- -{{{self.context_key}}} ---- - -Current conversation: ---- -{character_definition.name}: {character_definition.greeting} -{{{self.chat_history_key}}} ---- - -Human: {{{self.input_key}}} -{character_definition.name}:""" - ) - GPT3 = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0.5) - chatbot = ConversationChain( - llm=GPT3, verbose=True, memory=memory, prompt=prompt - ) - return chatbot - - def greet(self): - return self.character_definition.greeting - - def step(self, input): - return self.chain.run(input=input) diff --git a/spaces/runa91/barc_gradio/src/bps_2d/bps_for_segmentation.py b/spaces/runa91/barc_gradio/src/bps_2d/bps_for_segmentation.py deleted file mode 100644 index ef7382c5e875f878b296321fed6e0c46b037781e..0000000000000000000000000000000000000000 --- a/spaces/runa91/barc_gradio/src/bps_2d/bps_for_segmentation.py +++ /dev/null @@ -1,114 +0,0 @@ - -# code idea from https://github.com/sergeyprokudin/bps - -import os -import numpy as np -from PIL import Image -import time -import scipy -import scipy.spatial -import pymp - - -##################### -QUERY_POINTS = np.asarray([30, 34, 31, 55, 29, 84, 35, 108, 34, 145, 29, 171, 27, - 196, 29, 228, 58, 35, 61, 55, 57, 83, 56, 109, 63, 148, 58, 164, 57, 197, 60, - 227, 81, 26, 87, 58, 85, 87, 89, 117, 86, 142, 89, 172, 84, 197, 88, 227, 113, - 32, 116, 58, 112, 88, 118, 113, 109, 147, 114, 173, 119, 201, 113, 229, 139, - 29, 141, 59, 142, 93, 139, 117, 146, 147, 141, 173, 142, 201, 143, 227, 170, - 26, 173, 59, 166, 90, 174, 117, 176, 141, 169, 175, 167, 198, 172, 227, 198, - 30, 195, 59, 204, 85, 198, 116, 195, 140, 198, 175, 194, 193, 199, 227, 221, - 26, 223, 57, 227, 83, 227, 113, 227, 140, 226, 173, 230, 196, 228, 229]).reshape((64, 2)) -##################### - -class SegBPS(): - - def __init__(self, query_points=QUERY_POINTS, size=256): - self.size = size - self.query_points = query_points - row, col = np.indices((self.size, self.size)) - self.indices_rc = np.stack((row, col), axis=2) # (256, 256, 2) - self.pts_aranged = np.arange(64) - return - - def _do_kdtree(self, combined_x_y_arrays, points): - # see https://stackoverflow.com/questions/10818546/finding-index-of-nearest- - # point-in-numpy-arrays-of-x-and-y-coordinates - mytree = scipy.spatial.cKDTree(combined_x_y_arrays) - dist, indexes = mytree.query(points) - return indexes - - def calculate_bps_points(self, seg, thr=0.5, vis=False, out_path=None): - # seg: input segmentation image of shape (256, 256) with values between 0 and 1 - query_val = seg[self.query_points[:, 0], self.query_points[:, 1]] - pts_fg = self.pts_aranged[query_val>=thr] - pts_bg = self.pts_aranged[query_val=thr] - if candidate_inds_bg.shape[0] == 0: - candidate_inds_bg = np.ones((1, 2)) * 128 # np.zeros((1, 2)) - if candidate_inds_fg.shape[0] == 0: - candidate_inds_fg = np.ones((1, 2)) * 128 # np.zeros((1, 2)) - # calculate nearest points - all_nearest_points = np.zeros((64, 2)) - all_nearest_points[pts_fg, :] = candidate_inds_bg[self._do_kdtree(candidate_inds_bg, self.query_points[pts_fg, :]), :] - all_nearest_points[pts_bg, :] = candidate_inds_fg[self._do_kdtree(candidate_inds_fg, self.query_points[pts_bg, :]), :] - all_nearest_points_01 = all_nearest_points / 255. - if vis: - self.visualize_result(seg, all_nearest_points, out_path=out_path) - return all_nearest_points_01 - - def calculate_bps_points_batch(self, seg_batch, thr=0.5, vis=False, out_path=None): - # seg_batch: input segmentation image of shape (bs, 256, 256) with values between 0 and 1 - bs = seg_batch.shape[0] - all_nearest_points_01_batch = np.zeros((bs, self.query_points.shape[0], 2)) - for ind in range(0, bs): # 0.25 - seg = seg_batch[ind, :, :] - all_nearest_points_01 = self.calculate_bps_points(seg, thr=thr, vis=vis, out_path=out_path) - all_nearest_points_01_batch[ind, :, :] = all_nearest_points_01 - return all_nearest_points_01_batch - - def visualize_result(self, seg, all_nearest_points, out_path=None): - import matplotlib as mpl - mpl.use('Agg') - import matplotlib.pyplot as plt - # img: (256, 256, 3) - img = (np.stack((seg, seg, seg), axis=2) * 155).astype(np.int) - if out_path is None: - ind_img = 0 - out_path = '../test_img' + str(ind_img) + '.png' - fig, ax = plt.subplots() - plt.imshow(img) - plt.gca().set_axis_off() - plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0) - plt.margins(0,0) - ratio_in_out = 1 # 255 - for idx, (y, x) in enumerate(self.query_points): - x = int(x*ratio_in_out) - y = int(y*ratio_in_out) - plt.scatter([x], [y], marker="x", s=50) - x2 = int(all_nearest_points[idx, 1]) - y2 = int(all_nearest_points[idx, 0]) - plt.scatter([x2], [y2], marker="o", s=50) - plt.plot([x, x2], [y, y2]) - plt.savefig(out_path, bbox_inches='tight', pad_inches=0) - plt.close() - return - - - - - -if __name__ == "__main__": - ind_img = 2 # 4 - path_seg_top = '...../pytorch-stacked-hourglass/results/dogs_hg8_ks_24_v1/test/' - path_seg = os.path.join(path_seg_top, 'seg_big_' + str(ind_img) + '.png') - img = np.asarray(Image.open(path_seg)) - # min is 0.004, max is 0.9 - # low values are background, high values are foreground - seg = img[:, :, 1] / 255. - # calculate points - bps = SegBPS() - bps.calculate_bps_points(seg, thr=0.5, vis=False, out_path=None) - - diff --git a/spaces/sagittariusA/media_bias_detection_CS/app.py b/spaces/sagittariusA/media_bias_detection_CS/app.py deleted file mode 100644 index 57601f5aa9d5090fdf46d1ed71471a4596f270f1..0000000000000000000000000000000000000000 --- a/spaces/sagittariusA/media_bias_detection_CS/app.py +++ /dev/null @@ -1,79 +0,0 @@ -import gradio as gr -import torch -import torch.nn.functional as F -import numpy as np -from corpy.morphodita import Tokenizer - -import transformers -from transformers import AutoTokenizer, AutoModelForSequenceClassification - -model_checkpoint = 'ufal/robeczech-base' -device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") -transformers.logging.set_verbosity(transformers.logging.ERROR) - -def classify_sentence(sent:str): - toksentence = tokenizer(sent,truncation=True,return_tensors="pt") - model.eval() - with torch.no_grad(): - toksentence.to(device) - output = model(**toksentence) - - return F.softmax(output.logits,dim=1).argmax(dim=1) - -def classify_text(text:str): - tokenizer_morphodita = Tokenizer("czech") - - all = [] - for sentence in tokenizer_morphodita.tokenize(text, sents=True): - all.append(sentence) - - sentences = np.array([' '.join(x) for x in all]) - annotations = np.array(list(map(classify_sentence,sentences))) - - return annotations - -def classify_text_wrapper(text:str): - result = classify_text(text) - n = len(result) - non_biased = np.where(result==0)[0].shape[0] - biased = np.where(result==1)[0].shape[0] - - return {'Non-biased':non_biased/n,'Biased':biased/n} - - -def interpret_bias(text:str): - result = classify_text(text) - - tokenizer_morphodita = Tokenizer("czech") - - interpretation = [] - all = [] - for sentence in tokenizer_morphodita.tokenize(text, sents=True): - all.append(sentence) - - sentences = np.array([' '.join(x) for x in all]) - - for idx,sentence in enumerate(sentences): - score = 0 - #non biased - if result[idx] == 0: - score = -1 - #biased - if result[idx] == 1: - score = 1 - interpretation.append((sentence, score)) - - return interpretation - -tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) -model = AutoModelForSequenceClassification.from_pretrained("sagittariusA/media_bias_classifier_cs") -model.eval() - -label = gr.outputs.Label(num_top_classes=2) -inputs = gr.inputs.Textbox(placeholder=None, default="", label=None) -app = gr.Interface(fn=classify_text_wrapper,title='Bias classifier',theme='default', - inputs="textbox",layout='unaligned', outputs=label, capture_session=True - ,interpretation=interpret_bias) - -app.launch(inbrowser=True) - diff --git a/spaces/scedlatioru/img-to-music/example/Aerofly Fs Product Key Download !!HOT!!.md b/spaces/scedlatioru/img-to-music/example/Aerofly Fs Product Key Download !!HOT!!.md deleted file mode 100644 index ba1f454856e3d3d5dd7c4ba1380fd71470873541..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Aerofly Fs Product Key Download !!HOT!!.md +++ /dev/null @@ -1,144 +0,0 @@ -
                -

                Aerofly FS Product Key Download: How to Get Started with the Best Flight Simulator

                - -

                If you are looking for a realistic and immersive flight simulator experience, you might want to check out Aerofly FS 4. This is a highly advanced flight simulator that lets you fly a wide range of aircraft with fully animated and interactive 3D cockpits and custom coded systems. You can explore the world of flying in stunning graphics and realistic physics, from airliners and helicopters to fighter jets and gliders.

                -

                Aerofly Fs Product Key Download


                Download --->>> https://gohhs.com/2uEzz3



                - -

                But before you can enjoy this amazing flight simulator, you need to download and activate your Aerofly FS product key. In this article, we will show you how to do that in a few simple steps.

                - -

                Where to Buy Aerofly FS Product Key

                - -

                There are two ways to buy Aerofly FS 4: as a DVD boxed version or as a digital download version. The DVD boxed version can be purchased from the Aerosoft Store, while the digital download version can be purchased from Steam or other online platforms.

                - -

                The advantage of buying the DVD boxed version is that you get a physical copy of the game and a printed manual. The advantage of buying the digital download version is that you get instant access to the game and automatic updates.

                - -

                Whichever option you choose, you will receive a product key that you need to activate your game.

                - -

                How to Activate Your Aerofly FS Product Key

                - -

                The activation process depends on which version of the game you bought. If you bought the DVD boxed version from the Aerosoft Store, you need to follow these steps:

                - -
                  -
                1. Insert the DVD into your computer and run the setup program.
                2. -
                3. Follow the instructions on the screen to install the game.
                4. -
                5. When prompted, enter your product key that you received with your purchase.
                6. -
                7. Wait for the activation to complete and start the game.
                8. -
                - -

                If you bought the digital download version from Steam or another platform, you need to follow these steps:

                - -
                  -
                1. Download and install Steam on your computer if you don't have it already.
                2. -
                3. Create a Steam account or log in to your existing one.
                4. -
                5. Click on "Add a Game" at the bottom left corner of the Steam window and select "Activate a Product on Steam".
                6. -
                7. Follow the instructions on the screen and enter your product key that you received with your purchase.
                8. -
                9. Wait for the game to download and install and start it from your Steam library.
                10. -
                - -

                How to Update Your Aerofly FS Product Key

                - -

                If you bought the DVD boxed version of Aerofly FS 4, you need to manually update your game from time to time to get the latest features and bug fixes. To do that, you need to visit the download page on the official website and download the latest update for your game. The size of the download is approximately 2.6 GB. This update is only compatible with the DVD version or direct-download version of Aerofly FS 4 and cannot be used for the Steam version!

                -

                - -

                If you bought the digital download version of Aerofly FS 4 from Steam or another platform, you don't need to worry about updating your game. The updates will happen automatically whenever they are available. You just need to make sure that your Steam client is running and connected to the internet.

                - -

                Conclusion

                - -

                Aerofly FS 4 is a fantastic flight simulator that offers a realistic and immersive flying experience. To enjoy this game, you need to buy, download and activate your Aerofly FS product key. Depending on which version of the game you bought, this process can be different. We hope that this article helped you understand how to get started with Aerofly FS 4 and fly anywhere in the world.

                -

                What are the Features of Aerofly FS 4

                - -

                Aerofly FS 4 is not just a flight simulator, it's a flight simulation platform. It offers a lot of features that make it stand out from other flight simulators on the market. Some of these features are:

                - -
                  -
                • A large fleet of highly detailed aircraft with fully animated and interactive 3D cockpits and custom coded systems.
                • -
                • A realistic flight model that simulates aerodynamics, engine performance, flight controls, landing gear, weather effects and more.
                • -
                • A stunning scenery that covers the entire world with high resolution aerial images, elevation data, 3D buildings, landmarks and airports.
                • -
                • A dynamic weather system that allows you to customize the wind, clouds, visibility, temperature and pressure.
                • -
                • A user-friendly interface that lets you easily select your aircraft, location, time and weather.
                • -
                • A powerful editor that lets you create your own scenery, airports and missions.
                • -
                • A multiplayer mode that lets you fly with other pilots online or on a local network.
                • -
                • A VR mode that lets you experience flying in virtual reality with supported headsets.
                • -
                - -

                Why You Should Try Aerofly FS 4

                - -

                If you are a fan of flight simulation or just curious about flying, you should definitely give Aerofly FS 4 a try. This is a flight simulator that offers a realistic and immersive flying experience that is suitable for beginners, intermediate and professional flight sim pilots. You can learn the basics of flying, practice your skills, explore the world or challenge yourself with complex missions. You can also customize your flight to your liking, from choosing your aircraft and location to setting the weather and time. You can even fly with other pilots online or in VR mode.

                - -

                Aerofly FS 4 is a flight simulator that will keep you entertained for hours. It's a game that will make you feel like a real pilot. It's a game that will make you love flying.

                - -

                Conclusion

                - -

                Aerofly FS 4 is a highly realistic Flight Simulator for PC that lets you fly anywhere in the world with a large fleet of highly detailed aircraft. To enjoy this game, you need to download and activate your Aerofly FS product key. You can buy your product key as a DVD boxed version or as a digital download version from various online platforms. The activation process is easy and fast. Once you activate your product key, you can start flying and have fun.

                - -

                If you are looking for a realistic and immersive flight simulator experience, you should definitely try Aerofly FS 4. It's a game that will make you feel like a real pilot. It's a game that will make you love flying.

                -

                How to Install Aerofly FS 4

                - -

                Once you have downloaded and activated your Aerofly FS product key, you need to install the game on your PC. The installation process is easy and fast. Here are the steps to follow:

                - -
                  -
                1. Run the setup program that you downloaded or inserted from the DVD.
                2. -
                3. Choose the language and accept the license agreement.
                4. -
                5. Select the destination folder where you want to install the game.
                6. -
                7. Wait for the installation to complete and click on finish.
                8. -
                9. Launch the game from your desktop shortcut or start menu.
                10. -
                - -

                The game will automatically detect your hardware and graphics settings and optimize them for the best performance. You can also adjust them manually from the options menu if you want.

                - -

                How to Play Aerofly FS 4

                - -

                Now that you have installed Aerofly FS 4, you are ready to play and have fun. The game offers a lot of options and modes to suit your preferences and skills. You can choose from:

                - -
                  -
                • A free flight mode that lets you fly anywhere in the world with any aircraft and weather conditions.
                • -
                • A mission mode that lets you complete various tasks and challenges with different aircraft and scenarios.
                • -
                • A tutorial mode that lets you learn the basics of flying and how to operate different aircraft systems.
                • -
                • A multiplayer mode that lets you fly with other pilots online or on a local network.
                • -
                • A VR mode that lets you experience flying in virtual reality with supported headsets.
                • -
                - -

                To start playing, you need to select your aircraft, location, time and weather from the main menu. You can also customize your aircraft settings, such as fuel, weight, payload and more. Then, you can choose your starting position, such as runway, parking or air. You can also set your flight plan and navigation aids if you want.

                - -

                Once you are in the cockpit, you can use your mouse, keyboard, joystick or controller to control your aircraft. You can also interact with the 3D cockpit and use the instruments and switches. You can also use the menu bar at the top of the screen to access various options and features, such as maps, views, cameras, autopilot, ATC and more.

                - -

                Aerofly FS 4 is a very user-friendly flight simulator that lets you enjoy flying without too much hassle or complexity. However, if you want to learn more about the game and how to fly different aircraft, you can visit the official website, wiki pages, forum or contact support for more information and help.

                -

                How to Troubleshoot Aerofly FS 4

                - -

                Aerofly FS 4 is a very stable and reliable flight simulator that runs smoothly on most PC systems. However, if you encounter any problems or issues while playing the game, you can try some of the following troubleshooting tips:

                - -
                  -
                • Make sure your PC meets the minimum system requirements for Aerofly FS 4. You can check them on the official website or on the Steam store page.
                • -
                • Make sure your graphics drivers are up to date. You can download them from the website of your graphics card manufacturer.
                • -
                • Make sure your Windows operating system is up to date. You can check for updates from the Windows settings menu.
                • -
                • Make sure your antivirus software or firewall is not blocking or interfering with Aerofly FS 4. You can try disabling them temporarily or adding Aerofly FS 4 to the exception list.
                • -
                • Make sure you have enough free disk space on your PC. You can check it from the Windows file explorer.
                • -
                • Make sure you have a stable internet connection if you want to play online or download updates.
                • -
                • If you have any mods or custom content installed for Aerofly FS 4, make sure they are compatible with the latest version of the game. You can try disabling them or removing them if they cause any problems.
                • -
                - -

                If none of these tips help, you can contact support for more assistance. You can use the contact form on the official website or post your issue on the forum. You can also check the wiki pages or the FAQ section for more information and solutions.

                - -

                How to Get More Out of Aerofly FS 4

                - -

                Aerofly FS 4 is a flight simulator that offers a lot of content and features out of the box. However, if you want to get more out of the game, you can also explore some of the following options:

                - -
                  -
                • You can download and install additional aircraft, scenery, airports and missions from various online sources. Some of them are free and some of them are paid. You can find them on websites such as Aerosoft, SimMarket, FlightSim.com, AVSIM and more.
                • -
                • You can create your own content for Aerofly FS 4 using the editor tool that comes with the game. You can design your own scenery, airports and missions and share them with other users.
                • -
                • You can join a virtual airline or a flight club and fly with other pilots who share your passion for flying. You can find them on websites such as VATSIM, IVAO, FSCloud and more.
                • -
                • You can watch tutorials and videos on YouTube or other platforms that teach you how to fly different aircraft, how to use different systems, how to perform different maneuvers and more.
                • -
                • You can read books and magazines about aviation and flight simulation that give you more knowledge and insight into the world of flying.
                • -
                - -

                Aerofly FS 4 is a flight simulator that offers endless possibilities and opportunities for learning and fun. You can always find something new and exciting to do in the game.

                -

                Conclusion

                - -

                Aerofly FS 4 is a highly realistic and immersive flight simulator that lets you fly anywhere in the world with a large fleet of highly detailed aircraft. It's a game that will make you feel like a real pilot and make you love flying. To enjoy this game, you need to download and activate your Aerofly FS product key. You can buy your product key as a DVD boxed version or as a digital download version from various online platforms. The activation and installation process is easy and fast. Once you activate and install your product key, you can start flying and have fun.

                - -

                If you encounter any problems or issues while playing the game, you can try some of the troubleshooting tips that we provided in this article. You can also contact support for more assistance. If you want to get more out of the game, you can also explore some of the options that we suggested in this article. You can download and install additional content, create your own content, join a virtual airline or a flight club, watch tutorials and videos, read books and magazines and more.

                - -

                Aerofly FS 4 is a flight simulator that offers a lot of content and features out of the box. However, it also offers a lot of possibilities and opportunities for learning and fun. You can always find something new and exciting to do in the game.

                - -

                We hope that this article helped you understand how to get started with Aerofly FS 4 and fly anywhere in the world. If you have any questions or comments, please feel free to leave them below. Thank you for reading and happy flying!

                3cee63e6c2
                -
                -
                \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Gwen Guthrie - Collection 1982-86 FLAC MP3.md b/spaces/scedlatioru/img-to-music/example/Gwen Guthrie - Collection 1982-86 FLAC MP3.md deleted file mode 100644 index 8a0020072fcb2af421a1f95546d8747616c86de1..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Gwen Guthrie - Collection 1982-86 FLAC MP3.md +++ /dev/null @@ -1,6 +0,0 @@ -

                Gwen Guthrie - Collection 1982-86 FLAC MP3


                Download File ⚹⚹⚹ https://gohhs.com/2uEArP



                -
                -johnny-winter---roots-(2011)-flac- -mp3 · Gwen Guthrie - Collection 1982-86 FLAC MP3 · malajuven private lesson · Microsoft Office 2007 Prof ... 1fdad05405
                -
                -
                -

                diff --git a/spaces/scedlatioru/img-to-music/example/Tajima.dgml.v.11.0.5.2633.tajima.xi.crack ((HOT)).included.iso.md b/spaces/scedlatioru/img-to-music/example/Tajima.dgml.v.11.0.5.2633.tajima.xi.crack ((HOT)).included.iso.md deleted file mode 100644 index 2b94c6c2a7bca0d77e452256b4b73514326e0936..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Tajima.dgml.v.11.0.5.2633.tajima.xi.crack ((HOT)).included.iso.md +++ /dev/null @@ -1,6 +0,0 @@ -

                Tajima.dgml.v.11.0.5.2633.tajima.xi.crack.included.iso


                DOWNLOADhttps://gohhs.com/2uEAyu



                -
                -AUTOMATION STUDIO V 5.0液气压仿真软件 ... Tajima DG/ML by Pulse Edition Xi 波丝田岛版本11.0.5.2633 8国语言完全版电脑绣花设计软件:Tajima(田岛); Wilcom ES(威尔克姆)2006、天木、山 ... Fluent 流体工程仿真计算实例与应用配套光盘-ISO 1CD ... SystemView 5.0+培训教材+自适应滤波器库(crack ok!) 4d29de3e1b
                -
                -
                -

                diff --git a/spaces/scedlatioru/img-to-music/example/Train To Busan 2 Movie Hd 720p Download.md b/spaces/scedlatioru/img-to-music/example/Train To Busan 2 Movie Hd 720p Download.md deleted file mode 100644 index 54fa40f3caab6a753483cdbb07de92d59d4e667a..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Train To Busan 2 Movie Hd 720p Download.md +++ /dev/null @@ -1,108 +0,0 @@ - -

                Train To Busan 2 Movie Hd 720p Download: A Guide

                -

                If you are a fan of zombie movies, you might have watched Train to Busan, a 2016 South Korean film that became a global hit. The movie follows a group of passengers on a train from Seoul to Busan, who have to survive a zombie outbreak that spreads across the country. The movie was praised for its thrilling action, emotional drama, and social commentary.

                -

                But did you know that there is a sequel to Train to Busan? Yes, Train to Busan 2: Peninsula, also known as Bando or Peninsula, is a 2020 South Korean film that is set four years after the events of Train to Busan. The movie follows a former soldier who returns to the Korean peninsula, which has become a wasteland infested by zombies and rogue militias. He and his team have to complete a mission and escape from the peninsula alive.

                -

                Train To Busan 2 Movie Hd 720p Download


                Download File ✔✔✔ https://gohhs.com/2uEAxV



                -

                If you are interested in watching Train to Busan 2: Peninsula, you might be wondering how to download it in HD 720p quality. In this article, we will guide you on how to download Train to Busan 2 movie HD 720p from various sources. We will also review the movie and its features, pros and cons, and alternatives.

                - -

                How to Download Train To Busan 2 Movie HD 720p from Netnaija?

                -

                One of the sources where you can download Train to Busan 2 movie HD 720p is Netnaija, a website that offers free downloads of movies, music, TV shows, and more. Here are the steps to download Train to Busan 2 movie HD 720p from Netnaija:

                - -
                  -
                1. Go to https://netnaijar.com/train-to-busan-2-peninsula-full-movie-download/
                2. -
                3. Scroll down and click on the green button that says "Download"
                4. -
                5. Select the quality you want to download, such as HD 720p
                6. -
                7. Wait for the download link to be generated
                8. -
                9. Click on the download link and save the file on your device
                10. -
                11. Enjoy watching Train to Busan 2 movie HD 720p
                12. -
                - -

                How to Download Train To Busan 2 Movie HD 720p from Archive.org?

                -

                Another source where you can download Train to Busan 2 movie HD 720p is Archive.org, a website that provides free access to digital content such as books, movies, music, and more. Here are the steps to download Train to Busan 2 movie HD 720p from Archive.org:

                - -
                  -
                1. Go to https://archive.org/details/train-to-busan-2-peninsula-2020-720p-hevc-un-cut-blu-ray-hindi-english-vegamovies.-nl
                2. -
                3. Click on the blue button that says "DOWNLOAD OPTIONS"
                4. -
                5. Select the format you want to download, such as MP4 or MKV
                6. -
                7. Click on the file name and save the file on your device
                8. -
                9. Enjoy watching Train to Busan 2 movie HD 720p
                10. -
                - -

                How to Download Train To Busan 2 Movie HD 720p from KatMovieHD.se?

                -

                A third source where you can download Train to Busan 2 movie HD 720p is KatMovieHD.se, a website that offers free downloads of movies and TV shows in various languages and genres. Here are the steps to download Train to Busan 2 movie HD 720p from KatMovieHD.se:

                - -
                  -
                1. Go to https://katmoviehd.g3g.ink/train-to-busan-2-peninsula-hindi/
                2. -
                3. Scroll down and click on the red button that says "Download Links"
                4. -
                5. Select the server you want to download from, such as Google Drive or Mega
                6. -
                7. Follow the instructions on the screen to get the download link
                8. -
                9. Click on the download link and save the file on your device
                10. -
                11. Enjoy watching Train to Busan 2 movie HD 720p
                12. -
                - -

                What are the Features of Train To Busan 2 Movie HD 720p?

                -

                If you download Train to Busan 2 movie HD 720p from any of the sources above, you can enjoy some of its features, such as:

                - -
                  -
                • High-definition video quality that enhances your viewing experience
                • -
                • Dual audio option that lets you choose between Hindi and Korean languages
                • -
                • English subtitles that help you understand the dialogue better
                • -
                • Action-packed scenes that keep you on the edge of your seat
                • -
                • Zombie horror genre that gives you chills and thrills
                • -
                • Social commentary that reflects on the current issues of humanity
                • -
                - -

                What are the Pros and Cons of Train To Busan 2 Movie HD 720p?

                -

                As with any movie, Train to Busan 2 movie HD 720p has its pros and cons. Here are some of them:

                -

                - -

                Pros:

                -
                  -
                • You can watch Train to Busan 2 movie HD 720p for free without paying for a subscription or a ticket
                • -
                • You can watch Train to Busan 2 movie HD 720p at your own convenience and comfort
                • -
                • You can watch Train to Busan 2 movie HD 720p with your friends and family without any restrictions
                • -
                • You can watch Train to Busan 2 movie HD 720p as many times as you want without any limits
                • -
                • You can watch Train to Busan 2 movie HD 720p with high-definition video quality and dual audio option
                • -
                - -

                Cons:

                -
                  -
                • You are violating the terms and conditions of the movie producers and distributors by downloading Train to Busan 2 movie HD 720p without their permission
                • -
                • You are risking your device's security and performance by downloading Train to Busan 2 movie HD 720p from unknown or untrusted sources
                • -
                • You are not supporting the movie makers who work hard to create and deliver Train to Busan 2 movie HD 720p by downloading it for free
                • -
                • You are not getting the full cinematic experience of watching Train to Busan 2 movie HD 720p on a big screen with surround sound
                • -
                • You are not getting the latest updates or bug fixes of Train to Busan 2 movie HD

                  -

                  What are the Alternatives to Train To Busan 2 Movie HD 720p Download?

                  -

                  While Train to Busan 2 movie HD 720p download might seem tempting, it is not the only option for watching Train to Busan 2: Peninsula. There are other alternatives that are legal, safe, and affordable. Here are some of them:

                  - -
                    -
                  • Watch Train to Busan 2: Peninsula on a streaming platform. There are many streaming platforms that offer Train to Busan 2: Peninsula for online viewing, such as Netflix, Amazon Prime Video, Hulu, and more. You can watch Train to Busan 2: Peninsula with high-quality video and audio, subtitles and dubbing options, and interactive features. You can also watch other movies and shows on these platforms. However, you need to pay for a subscription or a rental fee to access these platforms.
                  • -
                  • Watch Train to Busan 2: Peninsula on a DVD or Blu-ray. There are many DVD or Blu-ray discs that offer Train to Busan 2: Peninsula for offline viewing, such as Amazon, Walmart, Best Buy, and more. You can watch Train to Busan 2: Peninsula with high-quality video and audio, subtitles and dubbing options, and bonus features. You can also watch other movies and shows on these discs. However, you need to buy or rent the disc and have a compatible player to watch it.
                  • -
                  • Watch Train to Busan 2: Peninsula in a theater. There are many theaters that offer Train to Busan 2: Peninsula for cinematic viewing, such as AMC, Regal, Cinemark, and more. You can watch Train to Busan 2: Peninsula with high-quality video and audio, subtitles and dubbing options, and immersive atmosphere. You can also watch other movies and shows in these theaters. However, you need to buy a ticket and follow the theater's rules and regulations to watch it.
                  • -
                  - -

                  How to Use Train To Busan 2 Movie HD 720p Download?

                  -

                  If you have downloaded Train to Busan 2 movie HD 720p from any of the sources above, you might be wondering how to use it. Here are some basic steps to get you started:

                  - -
                    -
                  1. Locate the downloaded file on your device.
                  2. -
                  3. Extract the file using WinRAR or 7-Zip if it is compressed.
                  4. -
                  5. Run the file using a media player that supports MP4 or MKV formats, such as VLC or MPC-HC.
                  6. -
                  7. Select the language you want to watch in, either Hindi or Korean.
                  8. -
                  9. Select the subtitle language you want to read in, either English or none.
                  10. -
                  11. Enjoy watching Train to Busan 2 movie HD 720p
                  12. -
                  - -

                  Conclusion

                  - -

                  Train To Busan 2 movie HD 720p download is a way of watching Train to Busan 2: Peninsula, a 2020 South Korean zombie horror film that is a sequel to Train to Busan. It has many features and benefits, but it also has many risks and drawbacks. You can download it from various sources using different methods, but you should be aware of the legal and ethical implications of doing so. If you want to watch Train to Busan 2: Peninsula without any problems or worries, we suggest you use one of the alternatives we mentioned above.

                  - - -- Check the article for grammar, spelling, and punctuation errors and correct them if any. -- Add some keywords to the article that are related to the main keyword and the topic of the article. For example, you can use keywords such as "zombie movie", "Korean movie", "action movie", "horror movie", etc. -- Make the article more engaging by adding some images, videos, quotes, statistics, or anecdotes that are relevant to the article and the keyword. For example, you can add an image of the movie poster, a video of the movie trailer, a quote from the movie director or actor, a statistic about the movie's box office or ratings, or an anecdote about your experience of watching the movie. -- Make the article more persuasive by adding some reasons why the readers should watch Train to Busan 2: Peninsula or use one of the alternatives instead of downloading it. For example, you can mention some of the positive reviews or awards that the movie has received, some of the benefits or advantages of watching it on a streaming platform, DVD or Blu-ray, or theater, or some of the drawbacks or disadvantages of downloading it illegally. -- Write another article on a different topic that is related to Train to Busan 2: Peninsula or zombie movies in general. For example, you can write an article about the plot summary, the cast and crew, the behind-the-scenes stories, the trivia and facts, the fan theories and speculations, the comparisons and contrasts with other zombie movies, etc. You can use a different keyword for each article and follow the same format and guidelines as this article. -

                  Train To Busan 2 movie HD 720p download is a way of watching Train to Busan 2: Peninsula, a 2020 South Korean zombie horror film that is a sequel to Train to Busan. It has many features and benefits, but it also has many risks and drawbacks. You can download it from various sources using different methods, but you should be aware of the legal and ethical implications of doing so. If you want to watch Train to Busan 2: Peninsula without any problems or worries, we suggest you use one of the alternatives we mentioned above.

                  3cee63e6c2
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/upfirdn2d.py b/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/upfirdn2d.py deleted file mode 100644 index 667f96e1ded35d48f163f37e21d1ed8ff191aac3..0000000000000000000000000000000000000000 --- a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/upfirdn2d/upfirdn2d.py +++ /dev/null @@ -1,186 +0,0 @@ -# modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501 - -import torch -from torch.autograd import Function -from torch.nn import functional as F - -try: - from . import upfirdn2d_ext -except ImportError: - import os - BASICSR_JIT = os.getenv('BASICSR_JIT') - if BASICSR_JIT == 'True': - from torch.utils.cpp_extension import load - module_path = os.path.dirname(__file__) - upfirdn2d_ext = load( - 'upfirdn2d', - sources=[ - os.path.join(module_path, 'src', 'upfirdn2d.cpp'), - os.path.join(module_path, 'src', 'upfirdn2d_kernel.cu'), - ], - ) - - -class UpFirDn2dBackward(Function): - - @staticmethod - def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_ext.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_ext.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], - # ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_ext.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - if input.device.type == 'cpu': - out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) - else: - out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) - - return out - - -def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) - out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/models/__init__.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/models/__init__.py deleted file mode 100644 index e3413961d1d184b99835eb1e919b052d70298bc6..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/models/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .GroundingDINO import build_groundingdino - - -def build_model(args): - # we use register to maintain models from catdet6 on. - from .registry import MODULE_BUILD_FUNCS - - assert args.modelname in MODULE_BUILD_FUNCS._module_dict - build_func = MODULE_BUILD_FUNCS.get(args.modelname) - model = build_func(args) - return model diff --git a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/pidinet/__init__.py b/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/pidinet/__init__.py deleted file mode 100644 index 6d427b0688664fe76f4321318ea99b374d58c64f..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/controlnet_annotator/pidinet/__init__.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -import torch -import numpy as np -from einops import rearrange -from .model import pidinet - -models_path = 'pretrained/controlnet/preprocess' - -netNetwork = None -remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/table5_pidinet.pth" -modeldir = os.path.join(models_path, "pidinet") -old_modeldir = os.path.dirname(os.path.realpath(__file__)) - -def safe_step(x, step=2): - y = x.astype(np.float32) * float(step + 1) - y = y.astype(np.int32).astype(np.float32) / float(step) - return y - -def load_file_from_url(url, model_dir=None, progress=True, file_name=None): - """Load file form http url, will download models if necessary. - - Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py - - Args: - url (str): URL to be downloaded. - model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir. - Default: None. - progress (bool): Whether to show the download progress. Default: True. - file_name (str): The downloaded file name. If None, use the file name in the url. Default: None. - - Returns: - str: The path to the downloaded file. - """ - from torch.hub import download_url_to_file, get_dir - from urllib.parse import urlparse - if model_dir is None: # use the pytorch hub_dir - hub_dir = get_dir() - model_dir = os.path.join(hub_dir, 'checkpoints') - - os.makedirs(model_dir, exist_ok=True) - - parts = urlparse(url) - filename = os.path.basename(parts.path) - if file_name is not None: - filename = file_name - cached_file = os.path.abspath(os.path.join(model_dir, filename)) - if not os.path.exists(cached_file): - print(f'Downloading: "{url}" to {cached_file}\n') - download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) - return cached_file - -def load_state_dict(ckpt_path, location='cpu'): - def get_state_dict(d): - return d.get('state_dict', d) - - _, extension = os.path.splitext(ckpt_path) - if extension.lower() == ".safetensors": - import safetensors.torch - state_dict = safetensors.torch.load_file(ckpt_path, device=location) - else: - state_dict = get_state_dict(torch.load( - ckpt_path, map_location=torch.device(location))) - state_dict = get_state_dict(state_dict) - print(f'Loaded state_dict from [{ckpt_path}]') - return state_dict - -def apply_pidinet(input_image, is_safe=False, apply_fliter=False, device='cpu'): - global netNetwork - if netNetwork is None: - modelpath = os.path.join(modeldir, "table5_pidinet.pth") - old_modelpath = os.path.join(old_modeldir, "table5_pidinet.pth") - if os.path.exists(old_modelpath): - modelpath = old_modelpath - elif not os.path.exists(modelpath): - load_file_from_url(remote_model_path, model_dir=modeldir) - netNetwork = pidinet() - ckp = load_state_dict(modelpath) - netNetwork.load_state_dict({k.replace('module.',''):v for k, v in ckp.items()}) - - netNetwork = netNetwork.to(device) - netNetwork.eval() - assert input_image.ndim == 3 - input_image = input_image[:, :, ::-1].copy() - with torch.no_grad(): - image_pidi = torch.from_numpy(input_image).float().to(device) - image_pidi = image_pidi / 255.0 - image_pidi = rearrange(image_pidi, 'h w c -> 1 c h w') - edge = netNetwork(image_pidi)[-1] - edge = edge.cpu().numpy() - if apply_fliter: - edge = edge > 0.5 - if is_safe: - edge = safe_step(edge) - edge = (edge * 255.0).clip(0, 255).astype(np.uint8) - - return edge[0][0] - -def unload_pid_model(): - global netNetwork - if netNetwork is not None: - netNetwork.cpu() \ No newline at end of file diff --git a/spaces/sidharthism/fashion-eye-try-on-demo/README.md b/spaces/sidharthism/fashion-eye-try-on-demo/README.md deleted file mode 100644 index 775aa7059c0a2a3bfde50195c4bccc3c960aa1e1..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye-try-on-demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fashion Eye Try On Demo -emoji: 🏢 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.1.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sidharthism/fashion-eye/models/stylegan2/stylegan2-pytorch/ppl.py b/spaces/sidharthism/fashion-eye/models/stylegan2/stylegan2-pytorch/ppl.py deleted file mode 100644 index 6b185c894ba719701baa6ac348e743a003ec5f27..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/models/stylegan2/stylegan2-pytorch/ppl.py +++ /dev/null @@ -1,104 +0,0 @@ -import argparse - -import torch -from torch.nn import functional as F -import numpy as np -from tqdm import tqdm - -import lpips -from model import Generator - - -def normalize(x): - return x / torch.sqrt(x.pow(2).sum(-1, keepdim=True)) - - -def slerp(a, b, t): - a = normalize(a) - b = normalize(b) - d = (a * b).sum(-1, keepdim=True) - p = t * torch.acos(d) - c = normalize(b - d * a) - d = a * torch.cos(p) + c * torch.sin(p) - - return normalize(d) - - -def lerp(a, b, t): - return a + (b - a) * t - - -if __name__ == '__main__': - device = 'cuda' - - parser = argparse.ArgumentParser() - - parser.add_argument('--space', choices=['z', 'w']) - parser.add_argument('--batch', type=int, default=64) - parser.add_argument('--n_sample', type=int, default=5000) - parser.add_argument('--size', type=int, default=256) - parser.add_argument('--eps', type=float, default=1e-4) - parser.add_argument('--crop', action='store_true') - parser.add_argument('ckpt', metavar='CHECKPOINT') - - args = parser.parse_args() - - latent_dim = 512 - - ckpt = torch.load(args.ckpt) - - g = Generator(args.size, latent_dim, 8).to(device) - g.load_state_dict(ckpt['g_ema']) - g.eval() - - percept = lpips.PerceptualLoss( - model='net-lin', net='vgg', use_gpu=device.startswith('cuda') - ) - - distances = [] - - n_batch = args.n_sample // args.batch - resid = args.n_sample - (n_batch * args.batch) - batch_sizes = [args.batch] * n_batch + [resid] - - with torch.no_grad(): - for batch in tqdm(batch_sizes): - noise = g.make_noise() - - inputs = torch.randn([batch * 2, latent_dim], device=device) - lerp_t = torch.rand(batch, device=device) - - if args.space == 'w': - latent = g.get_latent(inputs) - latent_t0, latent_t1 = latent[::2], latent[1::2] - latent_e0 = lerp(latent_t0, latent_t1, lerp_t[:, None]) - latent_e1 = lerp(latent_t0, latent_t1, lerp_t[:, None] + args.eps) - latent_e = torch.stack([latent_e0, latent_e1], 1).view(*latent.shape) - - image, _ = g([latent_e], input_is_latent=True, noise=noise) - - if args.crop: - c = image.shape[2] // 8 - image = image[:, :, c * 3 : c * 7, c * 2 : c * 6] - - factor = image.shape[2] // 256 - - if factor > 1: - image = F.interpolate( - image, size=(256, 256), mode='bilinear', align_corners=False - ) - - dist = percept(image[::2], image[1::2]).view(image.shape[0] // 2) / ( - args.eps ** 2 - ) - distances.append(dist.to('cpu').numpy()) - - distances = np.concatenate(distances, 0) - - lo = np.percentile(distances, 1, interpolation='lower') - hi = np.percentile(distances, 99, interpolation='higher') - filtered_dist = np.extract( - np.logical_and(lo <= distances, distances <= hi), distances - ) - - print('ppl:', filtered_dist.mean()) diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/archs/vqgan_arch.py b/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/archs/vqgan_arch.py deleted file mode 100644 index f6dfcf4c9983b431f0a978701e5ddd9598faf381..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/archs/vqgan_arch.py +++ /dev/null @@ -1,435 +0,0 @@ -''' -VQGAN code, adapted from the original created by the Unleashing Transformers authors: -https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py - -''' -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import copy -from basicsr.utils import get_root_logger -from basicsr.utils.registry import ARCH_REGISTRY - -def normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -@torch.jit.script -def swish(x): - return x*torch.sigmoid(x) - - -# Define VQVAE classes -class VectorQuantizer(nn.Module): - def __init__(self, codebook_size, emb_dim, beta): - super(VectorQuantizer, self).__init__() - self.codebook_size = codebook_size # number of embeddings - self.emb_dim = emb_dim # dimension of embedding - self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2 - self.embedding = nn.Embedding(self.codebook_size, self.emb_dim) - self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size) - - def forward(self, z): - # reshape z -> (batch, height, width, channel) and flatten - z = z.permute(0, 2, 3, 1).contiguous() - z_flattened = z.view(-1, self.emb_dim) - - # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z - d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \ - 2 * torch.matmul(z_flattened, self.embedding.weight.t()) - - mean_distance = torch.mean(d) - # find closest encodings - # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1) - min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False) - # [0-1], higher score, higher confidence - min_encoding_scores = torch.exp(-min_encoding_scores/10) - - min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z) - min_encodings.scatter_(1, min_encoding_indices, 1) - - # get quantized latent vectors - z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape) - # compute loss for embedding - loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2) - # preserve gradients - z_q = z + (z_q - z).detach() - - # perplexity - e_mean = torch.mean(min_encodings, dim=0) - perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10))) - # reshape back to match original input shape - z_q = z_q.permute(0, 3, 1, 2).contiguous() - - return z_q, loss, { - "perplexity": perplexity, - "min_encodings": min_encodings, - "min_encoding_indices": min_encoding_indices, - "min_encoding_scores": min_encoding_scores, - "mean_distance": mean_distance - } - - def get_codebook_feat(self, indices, shape): - # input indices: batch*token_num -> (batch*token_num)*1 - # shape: batch, height, width, channel - indices = indices.view(-1,1) - min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices) - min_encodings.scatter_(1, indices, 1) - # get quantized latent vectors - z_q = torch.matmul(min_encodings.float(), self.embedding.weight) - - if shape is not None: # reshape back to match original input shape - z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous() - - return z_q - - -class GumbelQuantizer(nn.Module): - def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0): - super().__init__() - self.codebook_size = codebook_size # number of embeddings - self.emb_dim = emb_dim # dimension of embedding - self.straight_through = straight_through - self.temperature = temp_init - self.kl_weight = kl_weight - self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits - self.embed = nn.Embedding(codebook_size, emb_dim) - - def forward(self, z): - hard = self.straight_through if self.training else True - - logits = self.proj(z) - - soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard) - - z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight) - - # + kl divergence to the prior loss - qy = F.softmax(logits, dim=1) - diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean() - min_encoding_indices = soft_one_hot.argmax(dim=1) - - return z_q, diff, { - "min_encoding_indices": min_encoding_indices - } - - -class Downsample(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) - - def forward(self, x): - pad = (0, 1, 0, 1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - return x - - -class Upsample(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) - - def forward(self, x): - x = F.interpolate(x, scale_factor=2.0, mode="nearest") - x = self.conv(x) - - return x - - -class ResBlock(nn.Module): - def __init__(self, in_channels, out_channels=None): - super(ResBlock, self).__init__() - self.in_channels = in_channels - self.out_channels = in_channels if out_channels is None else out_channels - self.norm1 = normalize(in_channels) - self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.norm2 = normalize(out_channels) - self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) - if self.in_channels != self.out_channels: - self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) - - def forward(self, x_in): - x = x_in - x = self.norm1(x) - x = swish(x) - x = self.conv1(x) - x = self.norm2(x) - x = swish(x) - x = self.conv2(x) - if self.in_channels != self.out_channels: - x_in = self.conv_out(x_in) - - return x + x_in - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = normalize(in_channels) - self.q = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 - ) - self.k = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 - ) - self.v = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 - ) - self.proj_out = torch.nn.Conv2d( - in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0 - ) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b, c, h, w = q.shape - q = q.reshape(b, c, h*w) - q = q.permute(0, 2, 1) - k = k.reshape(b, c, h*w) - w_ = torch.bmm(q, k) - w_ = w_ * (int(c)**(-0.5)) - w_ = F.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b, c, h*w) - w_ = w_.permute(0, 2, 1) - h_ = torch.bmm(v, w_) - h_ = h_.reshape(b, c, h, w) - - h_ = self.proj_out(h_) - - return x+h_ - - -class Encoder(nn.Module): - def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions): - super().__init__() - self.nf = nf - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.attn_resolutions = attn_resolutions - - curr_res = self.resolution - in_ch_mult = (1,)+tuple(ch_mult) - - blocks = [] - # initial convultion - blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1)) - - # residual and downsampling blocks, with attention on smaller res (16x16) - for i in range(self.num_resolutions): - block_in_ch = nf * in_ch_mult[i] - block_out_ch = nf * ch_mult[i] - for _ in range(self.num_res_blocks): - blocks.append(ResBlock(block_in_ch, block_out_ch)) - block_in_ch = block_out_ch - if curr_res in attn_resolutions: - blocks.append(AttnBlock(block_in_ch)) - - if i != self.num_resolutions - 1: - blocks.append(Downsample(block_in_ch)) - curr_res = curr_res // 2 - - # non-local attention block - blocks.append(ResBlock(block_in_ch, block_in_ch)) - blocks.append(AttnBlock(block_in_ch)) - blocks.append(ResBlock(block_in_ch, block_in_ch)) - - # normalise and convert to latent size - blocks.append(normalize(block_in_ch)) - blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1)) - self.blocks = nn.ModuleList(blocks) - - def forward(self, x): - for block in self.blocks: - x = block(x) - - return x - - -class Generator(nn.Module): - def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions): - super().__init__() - self.nf = nf - self.ch_mult = ch_mult - self.num_resolutions = len(self.ch_mult) - self.num_res_blocks = res_blocks - self.resolution = img_size - self.attn_resolutions = attn_resolutions - self.in_channels = emb_dim - self.out_channels = 3 - block_in_ch = self.nf * self.ch_mult[-1] - curr_res = self.resolution // 2 ** (self.num_resolutions-1) - - blocks = [] - # initial conv - blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1)) - - # non-local attention block - blocks.append(ResBlock(block_in_ch, block_in_ch)) - blocks.append(AttnBlock(block_in_ch)) - blocks.append(ResBlock(block_in_ch, block_in_ch)) - - for i in reversed(range(self.num_resolutions)): - block_out_ch = self.nf * self.ch_mult[i] - - for _ in range(self.num_res_blocks): - blocks.append(ResBlock(block_in_ch, block_out_ch)) - block_in_ch = block_out_ch - - if curr_res in self.attn_resolutions: - blocks.append(AttnBlock(block_in_ch)) - - if i != 0: - blocks.append(Upsample(block_in_ch)) - curr_res = curr_res * 2 - - blocks.append(normalize(block_in_ch)) - blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1)) - - self.blocks = nn.ModuleList(blocks) - - - def forward(self, x): - for block in self.blocks: - x = block(x) - - return x - - -@ARCH_REGISTRY.register() -class VQAutoEncoder(nn.Module): - def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256, - beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): - super().__init__() - logger = get_root_logger() - self.in_channels = 3 - self.nf = nf - self.n_blocks = res_blocks - self.codebook_size = codebook_size - self.embed_dim = emb_dim - self.ch_mult = ch_mult - self.resolution = img_size - self.attn_resolutions = attn_resolutions - self.quantizer_type = quantizer - self.encoder = Encoder( - self.in_channels, - self.nf, - self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, - self.attn_resolutions - ) - if self.quantizer_type == "nearest": - self.beta = beta #0.25 - self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta) - elif self.quantizer_type == "gumbel": - self.gumbel_num_hiddens = emb_dim - self.straight_through = gumbel_straight_through - self.kl_weight = gumbel_kl_weight - self.quantize = GumbelQuantizer( - self.codebook_size, - self.embed_dim, - self.gumbel_num_hiddens, - self.straight_through, - self.kl_weight - ) - self.generator = Generator( - self.nf, - self.embed_dim, - self.ch_mult, - self.n_blocks, - self.resolution, - self.attn_resolutions - ) - - if model_path is not None: - chkpt = torch.load(model_path, map_location='cpu') - if 'params_ema' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema']) - logger.info(f'vqgan is loaded from: {model_path} [params_ema]') - elif 'params' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) - logger.info(f'vqgan is loaded from: {model_path} [params]') - else: - raise ValueError(f'Wrong params!') - - - def forward(self, x): - x = self.encoder(x) - quant, codebook_loss, quant_stats = self.quantize(x) - x = self.generator(quant) - return x, codebook_loss, quant_stats - - - -# patch based discriminator -@ARCH_REGISTRY.register() -class VQGANDiscriminator(nn.Module): - def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None): - super().__init__() - - layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)] - ndf_mult = 1 - ndf_mult_prev = 1 - for n in range(1, n_layers): # gradually increase the number of filters - ndf_mult_prev = ndf_mult - ndf_mult = min(2 ** n, 8) - layers += [ - nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False), - nn.BatchNorm2d(ndf * ndf_mult), - nn.LeakyReLU(0.2, True) - ] - - ndf_mult_prev = ndf_mult - ndf_mult = min(2 ** n_layers, 8) - - layers += [ - nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False), - nn.BatchNorm2d(ndf * ndf_mult), - nn.LeakyReLU(0.2, True) - ] - - layers += [ - nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map - self.main = nn.Sequential(*layers) - - if model_path is not None: - chkpt = torch.load(model_path, map_location='cpu') - if 'params_d' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d']) - elif 'params' in chkpt: - self.load_state_dict(torch.load(model_path, map_location='cpu')['params']) - else: - raise ValueError(f'Wrong params!') - - def forward(self, x): - return self.main(x) \ No newline at end of file diff --git a/spaces/solara-dev/wanderlust/wanderlust.py b/spaces/solara-dev/wanderlust/wanderlust.py deleted file mode 100644 index f86d484635dea80be454d306360560b01195bad8..0000000000000000000000000000000000000000 --- a/spaces/solara-dev/wanderlust/wanderlust.py +++ /dev/null @@ -1,337 +0,0 @@ -import json -import os -import time -from pathlib import Path - -import ipyleaflet -from openai import NotFoundError, OpenAI -from openai.types.beta import Thread - -import solara - -HERE = Path(__file__).parent - -center_default = (0, 0) -zoom_default = 2 - -messages = solara.reactive([]) -zoom_level = solara.reactive(zoom_default) -center = solara.reactive(center_default) -markers = solara.reactive([]) - -url = ipyleaflet.basemaps.OpenStreetMap.Mapnik.build_url() -openai = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) -model = "gpt-4-1106-preview" -app_style = (HERE / "style.css").read_text() - - -# Declare tools for openai assistant to use -tools = [ - { - "type": "function", - "function": { - "name": "update_map", - "description": "Update map to center on a particular location", - "parameters": { - "type": "object", - "properties": { - "longitude": { - "type": "number", - "description": "Longitude of the location to center the map on", - }, - "latitude": { - "type": "number", - "description": "Latitude of the location to center the map on", - }, - "zoom": { - "type": "integer", - "description": "Zoom level of the map", - }, - }, - "required": ["longitude", "latitude", "zoom"], - }, - }, - }, - { - "type": "function", - "function": { - "name": "add_marker", - "description": "Add marker to the map", - "parameters": { - "type": "object", - "properties": { - "longitude": { - "type": "number", - "description": "Longitude of the location to the marker", - }, - "latitude": { - "type": "number", - "description": "Latitude of the location to the marker", - }, - "label": { - "type": "string", - "description": "Text to display on the marker", - }, - }, - "required": ["longitude", "latitude", "label"], - }, - }, - }, -] - - -def update_map(longitude, latitude, zoom): - center.set((latitude, longitude)) - zoom_level.set(zoom) - return "Map updated" - - -def add_marker(longitude, latitude, label): - markers.set(markers.value + [{"location": (latitude, longitude), "label": label}]) - return "Marker added" - - -functions = { - "update_map": update_map, - "add_marker": add_marker, -} - - -def assistant_tool_call(tool_call): - # actually executes the tool call the OpenAI assistant wants to perform - function = tool_call.function - name = function.name - arguments = json.loads(function.arguments) - return_value = functions[name](**arguments) - tool_outputs = { - "tool_call_id": tool_call.id, - "output": return_value, - } - return tool_outputs - - -@solara.component -def Map(): - ipyleaflet.Map.element( # type: ignore - zoom=zoom_level.value, - center=center.value, - scroll_wheel_zoom=True, - layers=[ - ipyleaflet.TileLayer.element(url=url), - *[ - ipyleaflet.Marker.element(location=k["location"], draggable=False) - for k in markers.value - ], - ], - ) - - -@solara.component -def ChatMessage(message): - with solara.Row(style={"align-items": "flex-start"}): - # Catch "messages" that are actually tool calls - if isinstance(message, dict): - icon = "mdi-map" if message["output"] == "Map updated" else "mdi-map-marker" - solara.v.Icon(children=[icon], style_="padding-top: 10px;") - solara.Markdown(message["output"]) - elif message.role == "user": - solara.Text(message.content[0].text.value, style={"font-weight": "bold;"}) - elif message.role == "assistant": - if message.content[0].text.value: - solara.v.Icon( - children=["mdi-compass-outline"], style_="padding-top: 10px;" - ) - solara.Markdown(message.content[0].text.value) - elif message.content.tool_calls: - solara.v.Icon(children=["mdi-map"], style_="padding-top: 10px;") - solara.Markdown("*Calling map functions*") - else: - solara.v.Icon( - children=["mdi-compass-outline"], style_="padding-top: 10px;" - ) - solara.Preformatted(repr(message)) - else: - solara.v.Icon(children=["mdi-compass-outline"], style_="padding-top: 10px;") - solara.Preformatted(repr(message)) - - -@solara.component -def ChatBox(children=[]): - # this uses a flexbox with column-reverse to reverse the order of the messages - # if we now also reverse the order of the messages, we get the correct order - # but the scroll position is at the bottom of the container automatically - with solara.Column(style={"flex-grow": "1"}): - solara.Style( - """ - .chat-box > :last-child{ - padding-top: 7.5vh; - } - """ - ) - # The height works effectively as `min-height`, since flex will grow the container to fill the available space - solara.Column( - style={ - "flex-grow": "1", - "overflow-y": "auto", - "height": "100px", - "flex-direction": "column-reverse", - }, - classes=["chat-box"], - children=list(reversed(children)), - ) - - -@solara.component -def ChatInterface(): - prompt = solara.use_reactive("") - run_id: solara.Reactive[str] = solara.use_reactive(None) - - # Create a thread to hold the conversation only once when this component is created - thread: Thread = solara.use_memo(openai.beta.threads.create, dependencies=[]) - - def add_message(value: str): - if value == "": - return - prompt.set("") - new_message = openai.beta.threads.messages.create( - thread_id=thread.id, content=value, role="user" - ) - messages.set([*messages.value, new_message]) - # this creates a new run for the thread - # also also triggers a rerender (since run_id.value changes) - # which will trigger the poll function blow to start in a thread - run_id.value = openai.beta.threads.runs.create( - thread_id=thread.id, - assistant_id="asst_RqVKAzaybZ8un7chIwPCIQdH", - tools=tools, - ).id - - def poll(): - if not run_id.value: - return - completed = False - while not completed: - try: - run = openai.beta.threads.runs.retrieve( - run_id.value, thread_id=thread.id - ) - # Above will raise NotFoundError when run creation is still in progress - except NotFoundError: - continue - if run.status == "requires_action": - tool_outputs = [] - for tool_call in run.required_action.submit_tool_outputs.tool_calls: - tool_output = assistant_tool_call(tool_call) - tool_outputs.append(tool_output) - messages.set([*messages.value, tool_output]) - openai.beta.threads.runs.submit_tool_outputs( - thread_id=thread.id, - run_id=run_id.value, - tool_outputs=tool_outputs, - ) - if run.status == "completed": - messages.set( - [ - *messages.value, - openai.beta.threads.messages.list(thread.id).data[0], - ] - ) - run_id.set(None) - completed = True - time.sleep(0.1) - - # run/restart a thread any time the run_id changes - result = solara.use_thread(poll, dependencies=[run_id.value]) - - # Create DOM for chat interface - with solara.Column(classes=["chat-interface"]): - if len(messages.value) > 0: - with ChatBox(): - for message in messages.value: - ChatMessage(message) - - with solara.Column(): - solara.InputText( - label="Where do you want to go?" - if len(messages.value) == 0 - else "Ask more question here", - value=prompt, - style={"flex-grow": "1"}, - on_value=add_message, - disabled=result.state == solara.ResultState.RUNNING, - ) - solara.ProgressLinear(result.state == solara.ResultState.RUNNING) - if result.state == solara.ResultState.ERROR: - solara.Error(repr(result.error)) - - -@solara.component -def Page(): - with solara.Column( - classes=["ui-container"], - gap="5vh", - ): - with solara.Row(justify="space-between"): - with solara.Row(gap="10px", style={"align-items": "center"}): - solara.v.Icon(children=["mdi-compass-rose"], size="36px") - solara.HTML( - tag="h2", - unsafe_innerHTML="Wanderlust", - style={"display": "inline-block"}, - ) - with solara.Row( - gap="30px", - style={"align-items": "center"}, - classes=["link-container"], - justify="end", - ): - with solara.Row(gap="5px", style={"align-items": "center"}): - solara.Text("Source Code:", style="font-weight: bold;") - # target="_blank" links are still easiest to do via ipyvuetify - with solara.v.Btn( - icon=True, - tag="a", - attributes={ - "href": "https://github.com/widgetti/wanderlust", - "title": "Wanderlust Source Code", - "target": "_blank", - }, - ): - solara.v.Icon(children=["mdi-github-circle"]) - with solara.Row(gap="5px", style={"align-items": "center"}): - solara.Text("Powered by Solara:", style="font-weight: bold;") - with solara.v.Btn( - icon=True, - tag="a", - attributes={ - "href": "https://solara.dev/", - "title": "Solara", - "target": "_blank", - }, - ): - solara.HTML( - tag="img", - attributes={ - "src": "https://solara.dev/static/public/logo.svg", - "width": "24px", - }, - ) - with solara.v.Btn( - icon=True, - tag="a", - attributes={ - "href": "https://github.com/widgetti/solara", - "title": "Solara Source Code", - "target": "_blank", - }, - ): - solara.v.Icon(children=["mdi-github-circle"]) - - with solara.Row( - justify="space-between", style={"flex-grow": "1"}, classes=["container-row"] - ): - ChatInterface() - with solara.Column(classes=["map-container"]): - Map() - - solara.Style(app_style) diff --git a/spaces/sonoisa/qiita_title_generator/README.md b/spaces/sonoisa/qiita_title_generator/README.md deleted file mode 100644 index 09a547e1bf282fdfe6311b65836ad6443d8f46dc..0000000000000000000000000000000000000000 --- a/spaces/sonoisa/qiita_title_generator/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Qiita_title_generator -emoji: 📚 -colorFrom: pink -colorTo: green -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/multilingual/sampled_multi_dataset.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/multilingual/sampled_multi_dataset.py deleted file mode 100644 index b0a617424ee3c5923b37796773da4c97851a16c5..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/data/multilingual/sampled_multi_dataset.py +++ /dev/null @@ -1,467 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import datetime -import hashlib -import logging -import time -from bisect import bisect_right -from collections import OrderedDict, defaultdict -from enum import Enum -from typing import List - -import numpy as np -import torch -from fairseq.data import FairseqDataset, data_utils -from fairseq.distributed import utils as distributed_utils - - -def get_time_gap(s, e): - return ( - datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) - ).__str__() - - -logger = logging.getLogger(__name__) - - -def default_virtual_size_func(datasets, ratios, max_scale_up=1.5): - sizes = [len(d) for d in datasets] - if ratios is None: - return sum(sizes) - largest_idx = np.argmax(sizes) - largest_r = ratios[largest_idx] - largest_s = sizes[largest_idx] - # set virtual sizes relative to the largest dataset - virtual_sizes = [(r / largest_r) * largest_s for r in ratios] - vsize = sum(virtual_sizes) - max_size = sum(sizes) * max_scale_up - return int(vsize if vsize < max_size else max_size) - - -class CollateFormat(Enum): - single = 1 - ordered_dict = 2 - - -class SampledMultiDataset(FairseqDataset): - """Samples from multiple sub-datasets according to given sampling ratios. - Args: - datasets ( - List[~torch.utils.data.Dataset] - or OrderedDict[str, ~torch.utils.data.Dataset] - ): datasets - sampling_ratios (List[float]): list of probability of each dataset to be sampled - (default: None, which corresponds to concatenating all dataset together). - seed (int): RNG seed to use (default: 2). - epoch (int): starting epoch number (default: 1). - eval_key (str, optional): a key used at evaluation time that causes - this instance to pass-through batches from *datasets[eval_key]*. - collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or - CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures - the collater to output batches of data mixed from all sub-datasets, - and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys - of sub-datasets. - Note that not all sub-datasets will present in a single batch in both formats. - virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). - split (str): the split of the data, e.g. 'train', 'valid' or 'test'. - shared_collater (bool): whether or not to all sub-datasets have the same collater. - shuffle (bool): whether or not to shuffle data (default: True). - """ - - def __init__( - self, - datasets, - sampling_ratios=None, - seed=2, - epoch=1, - eval_key=None, - collate_format=CollateFormat.single, - virtual_size=default_virtual_size_func, - split="", - shared_collater=False, - shuffle=True, - ): - super().__init__() - self.shared_collater = shared_collater - self.shuffle = shuffle - - if isinstance(datasets, OrderedDict): - self.keys = list(datasets.keys()) - datasets = list(datasets.values()) - elif isinstance(datasets, List): - self.keys = list(range(len(datasets))) - else: - raise AssertionError() - self.datasets = datasets - self.split = split - - self.eval_key = eval_key - if self.eval_key is not None: - self.collate_format = CollateFormat.single - else: - self.collate_format = collate_format - - self.seed = seed - self._cur_epoch = None - - self.cumulated_sizes = None - # self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset - # namely, data item i is sampled from the kth sub-dataset self.datasets[k] - # where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k] - self._cur_indices = None - - self._sizes = None - self.virtual_size_per_dataset = None - # caching properties - self._reset_cached_properties() - self.setup_sampling(sampling_ratios, virtual_size) - self.set_epoch(epoch) - - def _clean_if_not_none(self, var_list): - for v in var_list: - if v is not None: - del v - - def _reset_cached_properties(self): - self._clean_if_not_none([self._sizes, self._cur_indices]) - self._sizes = None - self._cur_indices = None - - def setup_sampling(self, sample_ratios, virtual_size): - sizes = [len(d) for d in self.datasets] - if sample_ratios is None: - # default back to concating datasets - self.sample_ratios = None - self.virtual_size = sum(sizes) - else: - if not isinstance(sample_ratios, np.ndarray): - sample_ratios = np.array(sample_ratios) - self.sample_ratios = sample_ratios - virtual_size = ( - default_virtual_size_func if virtual_size is None else virtual_size - ) - self.virtual_size = ( - virtual_size(self.datasets, self.sample_ratios) - if callable(virtual_size) - else virtual_size - ) - - def adjust_sampling(self, epoch, sampling_ratios, virtual_size): - if sampling_ratios is not None: - sampling_ratios = self._sync_sample_ratios(sampling_ratios) - self.setup_sampling(sampling_ratios, virtual_size) - - def _sync_sample_ratios(self, ratios): - # in case the ratios are not precisely the same across processes - # also to ensure every procresses update the ratios in the same pace - ratios = torch.DoubleTensor(ratios) - if torch.distributed.is_initialized(): - if torch.cuda.is_available(): - distributed_utils.all_reduce( - ratios.cuda(), group=distributed_utils.get_data_parallel_group() - ) - else: - distributed_utils.all_reduce( - ratios, group=distributed_utils.get_data_parallel_group() - ) - ret = ratios.cpu() - ret = ret.numpy() - return ret - - def random_choice_in_dataset(self, rng, dataset, choice_size): - if hasattr(dataset, "random_choice_in_dataset"): - return dataset.random_choice_in_dataset(rng, choice_size) - dataset_size = len(dataset) - return rng.choice( - dataset_size, choice_size, replace=(choice_size > dataset_size) - ) - - def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size): - def get_counts(sample_ratios): - counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64) - diff = virtual_size - counts.sum() - assert diff >= 0 - # due to round-offs, the size might not match the desired sizes - if diff > 0: - dataset_indices = rng.choice( - len(sample_ratios), size=diff, p=sample_ratios - ) - for i in dataset_indices: - counts[i] += 1 - return counts - - def get_in_dataset_indices(datasets, sizes, sample_ratios): - counts = get_counts(sample_ratios) - # uniformally sample desired counts for each dataset - # if the desired counts are large, sample with replacement: - indices = [ - self.random_choice_in_dataset(rng, d, c) - for c, d in zip(counts, datasets) - ] - return indices - - sizes = [len(d) for d in datasets] - if sample_ratios is None: - # default back to concating datasets - in_dataset_indices = [list(range(s)) for s in sizes] - virtual_sizes_per_dataset = sizes - else: - ratios = sample_ratios / sample_ratios.sum() - in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios) - virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices] - virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64) - cumulative_sizes = np.cumsum(virtual_sizes_per_dataset) - assert sum(virtual_sizes_per_dataset) == virtual_size - assert cumulative_sizes[-1] == virtual_size - if virtual_size < sum(sizes): - logger.warning( - f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})." - " If virtual size << real data size, there could be data coverage issue." - ) - in_dataset_indices = np.hstack(in_dataset_indices) - return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset - - def _get_dataset_and_index(self, index): - i = bisect_right(self.cumulated_sizes, index) - return i, self._cur_indices[index] - - def __getitem__(self, index): - # self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]] - # where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k] - ds_idx, ds_sample_idx = self._get_dataset_and_index(index) - ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx]) - return ret - - def num_tokens(self, index): - return self.sizes[index].max() - - def num_tokens_vec(self, indices): - sizes_vec = self.sizes[np.array(indices)] - # max across all dimensions but first one - return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape)))) - - def size(self, index): - return self.sizes[index] - - def __len__(self): - return self.virtual_size - - def collater(self, samples, **extra_args): - """Merge a list of samples to form a mini-batch.""" - if len(samples) == 0: - return None - if self.collate_format == "ordered_dict": - collect_samples = [[] for _ in range(len(self.datasets))] - for (i, sample) in samples: - collect_samples[i].append(sample) - batch = OrderedDict( - [ - (self.keys[i], dataset.collater(collect_samples[i])) - for i, (key, dataset) in enumerate(zip(self.keys, self.datasets)) - if len(collect_samples[i]) > 0 - ] - ) - elif self.shared_collater: - batch = self.datasets[0].collater([s for _, s in samples]) - else: - samples_dict = defaultdict(list) - pad_to_length = ( - defaultdict(int) - if "pad_to_length" not in extra_args - else extra_args["pad_to_length"] - ) - for ds_idx, s in samples: - pad_to_length["source"] = max( - pad_to_length["source"], s["source"].size(0) - ) - if s["target"] is not None: - pad_to_length["target"] = max( - pad_to_length["target"], s["target"].size(0) - ) - samples_dict[ds_idx].append(s) - batches = [ - self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length) - for i in range(len(self.datasets)) - if len(samples_dict[i]) > 0 - ] - - def straight_data(tensors): - batch = torch.cat(tensors, dim=0) - return batch - - src_lengths = straight_data( - [b["net_input"]["src_lengths"] for b in batches] - ) - src_lengths, sort_order = src_lengths.sort(descending=True) - - def straight_order(tensors): - batch = straight_data(tensors) - return batch.index_select(0, sort_order) - - batch = { - "id": straight_order([b["id"] for b in batches]), - "nsentences": sum(b["nsentences"] for b in batches), - "ntokens": sum(b["ntokens"] for b in batches), - "net_input": { - "src_tokens": straight_order( - [b["net_input"]["src_tokens"] for b in batches] - ), - "src_lengths": src_lengths, - }, - "target": straight_order([b["target"] for b in batches]) - if batches[0]["target"] is not None - else None, - } - if "prev_output_tokens" in batches[0]["net_input"]: - batch["net_input"]["prev_output_tokens"] = straight_order( - [b["net_input"]["prev_output_tokens"] for b in batches] - ) - if "src_lang_id" in batches[0]["net_input"]: - batch["net_input"]["src_lang_id"] = straight_order( - [b["net_input"]["src_lang_id"] for b in batches] - ) - if "tgt_lang_id" in batches[0]: - batch["tgt_lang_id"] = straight_order( - [b["tgt_lang_id"] for b in batches] - ) - return batch - - @property - def sizes(self): - if self._sizes is not None: - return self._sizes - start_time = time.time() - in_sub_dataset_indices = [ - self._cur_indices[ - 0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i] - ] - for i in range(len(self.datasets)) - ] - sub_dataset_sizes = [ - d.sizes[indices] - for d, indices in zip(self.datasets, in_sub_dataset_indices) - ] - self._sizes = np.vstack(sub_dataset_sizes) - logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}") - return self._sizes - - def ordered_indices(self): - if self.shuffle: - indices = np.random.permutation(len(self)) - else: - indices = np.arange(len(self)) - - sizes = self.sizes - tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None - src_sizes = ( - sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes - ) - - # sort by target length, then source length - if tgt_sizes is not None: - indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] - sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")] - return sort_indices - - def prefetch(self, indices): - prefetch_indices = [[] for _ in range(len(self.datasets))] - for i in indices: - ds_idx, ds_sample_idx = self._get_dataset_and_index(i) - prefetch_indices[ds_idx].append(ds_sample_idx) - for i in range(len(prefetch_indices)): - self.datasets[i].prefetch(prefetch_indices[i]) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return False - - def set_epoch(self, epoch): - super().set_epoch(epoch) - if epoch == self._cur_epoch: - # re-enter so return - return - for d in self.datasets: - if hasattr(d, "set_epoch"): - d.set_epoch(epoch) - self._cur_epoch = epoch - self._establish_virtual_datasets() - - def _establish_virtual_datasets(self): - if self.sample_ratios is None and self._cur_indices is not None: - # not a samping dataset, no need to resample if indices are already established - return - self._reset_cached_properties() - - start_time = time.time() - # Generate a weighted sample of indices as a function of the - # random seed and the current epoch. - rng = np.random.RandomState( - [ - int( - hashlib.sha1( - str(self.__class__.__name__).encode("utf-8") - ).hexdigest(), - 16, - ) - % (2 ** 32), - self.seed % (2 ** 32), # global seed - self._cur_epoch, # epoch index, - ] - ) - self._clean_if_not_none( - [self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes] - ) - self._sizes = None - - indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices( - rng, self.datasets, self.sample_ratios, self.virtual_size - ) - self._cur_indices = indices - self.cumulated_sizes = cumulated_sizes - self.virtual_size_per_dataset = virtual_size_per_dataset - - raw_sizes = [len(d) for d in self.datasets] - sampled_sizes = self.virtual_size_per_dataset - logger.info( - f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; " - f"raw total size: {sum(raw_sizes)}" - ) - logger.info( - f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; " - f"resampled total size: {sum(sampled_sizes)}" - ) - if self.sample_ratios is not None: - logger.info( - f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}" - ) - else: - logger.info(f"[{self.split}] A concat dataset") - logger.info( - f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}" - ) - - def filter_indices_by_size(self, indices, max_sizes): - """Filter a list of sample indices. Remove those that are longer - than specified in max_sizes. - - Args: - indices (np.array): original array of sample indices - max_sizes (int or list[int] or tuple[int]): max sample size, - can be defined separately for src and tgt (then list or tuple) - - Returns: - np.array: filtered sample array - list: list of removed indices - """ - sizes = self.sizes - tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None - src_sizes = ( - sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes - ) - - return data_utils.filter_paired_dataset_indices_by_size( - src_sizes, tgt_sizes, indices, max_sizes - ) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/lr_scheduler/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/lr_scheduler/__init__.py deleted file mode 100644 index 5b3dbc023aa4a6f7bfb8403b8204d71ca432f79c..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/lr_scheduler/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import importlib -import os - -from fairseq import registry -from fairseq.optim.lr_scheduler.fairseq_lr_scheduler import ( # noqa - FairseqLRScheduler, - LegacyFairseqLRScheduler, -) -from omegaconf import DictConfig - - -( - build_lr_scheduler_, - register_lr_scheduler, - LR_SCHEDULER_REGISTRY, - LR_SCHEDULER_DATACLASS_REGISTRY, -) = registry.setup_registry( - "--lr-scheduler", base_class=FairseqLRScheduler, default="fixed" -) - - -def build_lr_scheduler(cfg: DictConfig, optimizer): - return build_lr_scheduler_(cfg, optimizer) - - -# automatically import any Python files in the optim/lr_scheduler/ directory -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - file_name = file[: file.find(".py")] - importlib.import_module("fairseq.optim.lr_scheduler." + file_name) diff --git a/spaces/srivarshan/argumentation-quality-analyzer/preprocess.py b/spaces/srivarshan/argumentation-quality-analyzer/preprocess.py deleted file mode 100644 index 4d0b8e55b6842c45079d265f287e749bed528842..0000000000000000000000000000000000000000 --- a/spaces/srivarshan/argumentation-quality-analyzer/preprocess.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -from nltk.corpus.reader import pickle -import pandas as pd -import numpy as np -from nltk.corpus import stopwords -from nltk.stem import SnowballStemmer - - -def clean_text(text): - stop_words = set(stopwords.words("english")) - # english_stopwords = stopwords.words("english") - english_stemmer = SnowballStemmer("english") - text = text.replace('', '') # Remove - text = re.sub(r'[^\w]', ' ', text) # Remove symbols - text = re.sub(r'[ ]{2,}', ' ', text) # Remove extra spaces - text = re.sub(r'[ \t]+$', '', text) # Remove trailing white spaces - tokens = [] - for token in text.split(): - if token not in stop_words: - token = english_stemmer.stem(token) - tokens.append(token) - return " ".join(tokens) - -def preprocess_pipeline(text): - return clean_text(text) - -def vectorizer(text): - count_vectorizer = pickle.load(open("vectorizers/count_vectorizer.pkl", "rb")) - return count_vectorizer.transform(text) - diff --git a/spaces/stomexserde/gpt4-ui/Examples/Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable] __HOT__.md b/spaces/stomexserde/gpt4-ui/Examples/Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable] __HOT__.md deleted file mode 100644 index 950312e13ef2b0a0f2199febc774a2109056e16d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable] __HOT__.md +++ /dev/null @@ -1,21 +0,0 @@ -
                  -

                  Aces Of The Luftwaffe - Squadron Extended Edition: A Thrilling Shoot 'Em Up Game

                  - -

                  If you are looking for a fun and exciting game that will keep you on the edge of your seat, then you should check out Aces Of The Luftwaffe - Squadron Extended Edition. This game is a vertical scrolling shoot 'em up that features a captivating and fully voiced narrative, as well as stunning graphics and sound effects.

                  - -

                  In this game, you will take control of a squadron of four pilots who have to face the German aerial forces in World War II. You will have to dodge bullets, collect power ups, use special abilities and defeat waves of enemies as you progress through the story. You will also be able to upgrade each member of your squadron with their personal skill tree as you level up.

                  -

                  Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable]


                  Download Zip ===> https://urlgoal.com/2uI63t



                  - -

                  Aces Of The Luftwaffe - Squadron Extended Edition is the ultimate version of the game, as it includes the base game and all the DLCs that add new missions, characters, enemies and bosses. You will be able to enjoy hours of gameplay with this game, as it offers different modes and difficulties to suit your preferences.

                  - -

                  If you are looking for a portable version of the game, you can also download Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable] from our website. This version allows you to play the game without installing it on your PC, so you can take it with you wherever you go. All you need is a USB drive and a compatible device.

                  - -

                  Aces Of The Luftwaffe - Squadron Extended Edition is a game that will appeal to fans of shoot 'em up games, as well as anyone who enjoys a good story and a challenge. You can get it now from Steam or from our website for a reasonable price. Don't miss this opportunity to experience one of the best games of its genre.

                  - -

                  One of the best features of Aces Of The Luftwaffe - Squadron Extended Edition is the gameplay. This game offers a thrilling and challenging experience that will test your skills and reflexes. You will have to maneuver your plane through different environments, such as cities, forests, deserts and oceans, while avoiding enemy fire and obstacles. You will also have to face different types of enemies, such as planes, tanks, ships and bosses, each with their own patterns and behaviors.

                  - -

                  The game also has a co-op mode that allows you to play with up to three friends on the same screen. You can choose from four different characters, each with their own personality and abilities. You can also customize your planes with different weapons and upgrades that you can unlock as you play. The co-op mode adds a lot of fun and replay value to the game, as you can cooperate or compete with your friends.

                  - -

                  If you want to see some gameplay footage of Aces Of The Luftwaffe - Squadron Extended Edition, you can check out some of the videos on YouTube[^1^] [^2^] [^3^]. You will be able to see how the game looks and plays on different platforms, such as PC, PS4 and Switch. You will also be able to hear the voice acting and the music that enhance the atmosphere of the game.

                  81aa517590
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Bosch Esi Tronic Keygen 1q.2013.rar [crack _BEST_ED].md b/spaces/stomexserde/gpt4-ui/Examples/Bosch Esi Tronic Keygen 1q.2013.rar [crack _BEST_ED].md deleted file mode 100644 index b60c4cadaad44942aa7f902fd0e808eac47c65f0..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Bosch Esi Tronic Keygen 1q.2013.rar [crack _BEST_ED].md +++ /dev/null @@ -1,36 +0,0 @@ - -

                  How to Install and Activate Bosch ESI Tronic 2013.1 Software

                  -

                  Bosch ESI Tronic is one of the leading diagnostic software for the maintenance, diagnosis and repair of vehicles. It covers a wide range of vehicles worldwide and provides access to vehicle data, wiring diagrams, repair guides, manuals and more. However, to use this software, you need to install it on your computer and activate it with a valid keygen.

                  -

                  In this article, we will show you how to install and activate Bosch ESI Tronic 2013.1 software using a cracked rar file that contains the keygen and the patch. This file is called Bosch ESI Tronic Keygen 1q.2013.rar [CRACKED] and you can download it from various online sources. However, we do not recommend using cracked software as it may contain viruses or malware that can harm your computer or compromise your data. Use it at your own risk.

                  -

                  Bosch Esi Tronic Keygen 1q.2013.rar [CRACKED]


                  DOWNLOAD ··· https://urlgoal.com/2uI6xY



                  -

                  Step 1: Download and Extract the Rar File

                  -

                  The first step is to download the Bosch ESI Tronic Keygen 1q.2013.rar [CRACKED] file from a reliable source. You can use a torrent client or a direct download link to get the file. The file size is about 20 GB, so make sure you have enough space on your hard drive and a stable internet connection.

                  -

                  After downloading the file, you need to extract it using a program like WinRAR or 7-Zip. You will get a folder with four subfolders: DVD-1, DVD-2, DVD-3 and DVD-4. These are the installation DVDs for the Bosch ESI Tronic 2013.1 software.

                  -

                  Step 2: Install the Software

                  -

                  The next step is to install the software on your computer. You need to have a Bosch diagnostic device from the KTS series connected to your computer via USB or Bluetooth. You also need to have a valid license for the software or a subscription to ESI [tronic] 2.0 Online service.

                  -

                  To install the software, follow these steps:

                  -
                    -
                  1. Insert DVD-1 into your DVD drive and run Setup.exe as administrator.
                  2. -
                  3. Follow the instructions on the screen and choose your language and installation path.
                  4. -
                  5. When prompted, enter your license number or login credentials for ESI [tronic] 2.0 Online service.
                  6. -
                  7. Wait for the installation to complete and restart your computer if required.
                  8. -
                  9. Repeat the same process for DVD-2, DVD-3 and DVD-4.
                  10. -
                  -

                  After installing all four DVDs, you should have the Bosch ESI Tronic 2013.1 software on your computer.

                  -

                  Step 3: Activate the Software

                  -

                  The final step is to activate the software using the keygen and the patch that are included in the Bosch ESI Tronic Keygen 1q.2013.rar [CRACKED] file. To do this, follow these steps:

                  -

                  -
                    -
                  1. Open the folder where you extracted the rar file and run ESI 2013.3 KG v 1.0_2.exe as administrator.
                  2. -
                  3. Select your language and click OK.
                  4. -
                  5. Enter your hardware ID (you can find it in ESI [tronic] software under Help -> About) and click Generate.
                  6. -
                  7. Copy the generated activation code and paste it in ESI [tronic] software under Help -> Activation -> Enter Code.
                  8. -
                  9. Click OK and close the keygen.
                  10. -
                  11. Open the folder where you extracted the rar file and run Patch.exe as administrator.
                  12. -
                  13. Select your language and click OK.
                  14. -
                  15. Select your installation path (usually C:\ESI) and click Patch.
                  16. -
                  17. Wait for the patching process to finish and close the patch.
                  18. -
                  -

                  Congratulations! You have successfully installed and activated Bosch ESI Tronic 2013.1 software using Bosch ESI Tronic Keygen 1q.2013.rar [CRACKED]. You can now use the software to diagnose and repair vehicles

                  cec2833e83
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/JotunPaintsCatalogueDownloadPdf.md b/spaces/stomexserde/gpt4-ui/Examples/JotunPaintsCatalogueDownloadPdf.md deleted file mode 100644 index 8cacdf39c5ec9715f4930cca4042c1df763a75e4..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/JotunPaintsCatalogueDownloadPdf.md +++ /dev/null @@ -1,33 +0,0 @@ - -

                  How to Download Jotun Paints Catalogue PDF for Free

                  -

                  If you are looking for a reliable and high-quality paint brand for your home or project, you might want to check out Jotun Paints. Jotun Paints is a leading manufacturer of decorative and protective coatings, with a wide range of products for different surfaces and environments. Whether you need interior or exterior paints, wood stains, primers, or special coatings, Jotun Paints has something for you.

                  -

                  But how can you find the right product and colour for your needs? One way is to download the Jotun Paints Catalogue PDF for free from their website. The catalogue contains detailed information about their products, including technical data sheets, safety data sheets, application guides, colour charts, and more. You can also browse through their latest collections and trends, such as the Stories Collection 2023[^2^], which features colours that inspire positivity and creativity.

                  -

                  JotunPaintsCatalogueDownloadPdf


                  Download Ziphttps://urlgoal.com/2uIc5e



                  -

                  To download the Jotun Paints Catalogue PDF for free, you need to follow these simple steps:

                  -
                    -
                  1. Go to https://www.jotun.com/me-en/decorative/datasheets-search [^1^]. This is the datasheets search page of Jotun Paints Middle East.
                  2. -
                  3. Type in the product name or code that you are interested in. For example, if you want to see the catalogue for Jotashield Antifade Colours, type in "Jotashield Antifade Colours" or "1768".
                  4. -
                  5. Select the document type that you want to download. You can choose from technical data sheets (TDS), safety data sheets (SDS), application guides (AG), or factory delivery vouchers (FDV).
                  6. -
                  7. Select the country and language that you prefer. You can choose from Arabic, English, French, or Swahili.
                  8. -
                  9. Click on the "Search" button. You will see a list of results that match your criteria.
                  10. -
                  11. Click on the "Download" button next to the document that you want to download. The PDF file will open in a new tab or window.
                  12. -
                  13. Save the PDF file to your device or print it out if you wish.
                  14. -
                  -

                  That's it! You have successfully downloaded the Jotun Paints Catalogue PDF for free. You can now browse through their products and colours and find the best one for your project.

                  -

                  If you have any questions or need more assistance, you can contact Jotun Paints customer service at +971 4 339 5000 or email them at jotun.me@jotun.com. You can also visit their website at https://www.jotun.com/me/en/decorative/ [^2^] for more information and inspiration.

                  - -

                  Why Choose Jotun Paints?

                  -

                  Jotun Paints is not just a paint brand, it is a global leader in the coatings industry. With over 100 years of experience and innovation, Jotun Paints has developed products that meet the highest standards of quality, performance, and sustainability. Jotun Paints has a presence in more than 100 countries and serves customers in various sectors, such as marine, protective, decorative, and powder coatings.

                  -

                  When you choose Jotun Paints, you are choosing a brand that cares about your needs and the environment. Jotun Paints offers products that are eco-friendly, low VOC, water-based, and easy to apply. Jotun Paints also provides colour consultancy and technical support to help you achieve the best results for your project.

                  -

                  -

                  Some of the benefits of using Jotun Paints are:

                  -
                    -
                  • They have a wide range of colours and finishes to suit any style and mood.
                  • -
                  • They have products that are specially designed for different surfaces and climates, such as concrete, wood, metal, drywall, humid, hot, or cold.
                  • -
                  • They have products that are durable, weather-resistant, fade-resistant, anti-fungal, anti-bacterial, anti-corrosive, and anti-graffiti.
                  • -
                  • They have products that are easy to apply, clean, and maintain.
                  • -
                  • They have products that are certified by international standards and organizations, such as ISO, ASTM, Green Seal, Green Label, Green Guard, etc.
                  • -
                  -

                  With Jotun Paints, you can transform your space into a beautiful and comfortable place that reflects your personality and vision. Whether you want to create a cozy living room, a vibrant kitchen, a relaxing bedroom, or a professional office, Jotun Paints has the perfect product and colour for you.

                  e93f5a0c3f
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/sub314xxl/StyleGAN-XL/style.css b/spaces/sub314xxl/StyleGAN-XL/style.css deleted file mode 100644 index 8dd6cf3081735167994093f71d1d0c80d1a7d144..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/StyleGAN-XL/style.css +++ /dev/null @@ -1,11 +0,0 @@ -h1 { - text-align: center; -} -div#result { - max-width: 600px; - max-height: 600px; -} -img#visitor-badge { - display: block; - margin: auto; -} diff --git a/spaces/sub314xxl/radames-kandinsky-2-1-img2img/README.md b/spaces/sub314xxl/radames-kandinsky-2-1-img2img/README.md deleted file mode 100644 index c931d74992abddee10e6745ed0990da2f050ba95..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/radames-kandinsky-2-1-img2img/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Radames Kandinsky 2 1 Img2img -emoji: 📉 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/suchun/chatGPT_acdemic/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/sunshineatnoon/TextureScraping/swapae/models/networks/classifier.py b/spaces/sunshineatnoon/TextureScraping/swapae/models/networks/classifier.py deleted file mode 100644 index 86b2eab0c31ef8999536232eaf405229dd3b0953..0000000000000000000000000000000000000000 --- a/spaces/sunshineatnoon/TextureScraping/swapae/models/networks/classifier.py +++ /dev/null @@ -1,28 +0,0 @@ -import torch -from swapae.models.networks import BaseNetwork -from swapae.models.networks.pyramidnet import PyramidNet - - -class PyramidNetClassifier(BaseNetwork): - @staticmethod - def modify_commandline_options(parser, is_train): - parser.add_argument("--pyramid_alpha", type=int, default=240) - parser.add_argument("--pyramid_depth", type=int, default=200) - return parser - - def __init__(self, opt): - super().__init__(opt) - assert "cifar" in opt.dataset_mode - self.net = PyramidNet( - opt.dataset_mode, depth=opt.pyramid_depth, alpha=opt.pyramid_alpha, - num_classes=opt.num_classes, bottleneck=True) - - mean = torch.tensor([x / 127.5 - 1.0 for x in [125.3, 123.0, 113.9]], dtype=torch.float) - std = torch.tensor([x / 127.5 for x in [63.0, 62.1, 66.7]], dtype=torch.float) - self.register_buffer("mean", mean[None, :, None, None]) - self.register_buffer("std", std[None, :, None, None]) - - def forward(self, x): - x = (x - self.mean) / self.std - return self.net(x) - diff --git a/spaces/supertori/files/main.py b/spaces/supertori/files/main.py deleted file mode 100644 index 50ff4bf849790c36b64db7053636eeb5f3ddf560..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/main.py +++ /dev/null @@ -1,468 +0,0 @@ -''' -Modified version for full net lora -(Lora for ResBlock and up/down sample block) -''' -import os, sys -import re -import torch - -from modules import shared, devices, sd_models -import lora -from locon_compvis import LoConModule, LoConNetworkCompvis, create_network_and_apply_compvis - - -try: - ''' - Hijack Additional Network extension - ''' - # skip addnet since don't support new version - raise - now_dir = os.path.dirname(os.path.abspath(__file__)) - addnet_path = os.path.join(now_dir, '..', '..', 'sd-webui-additional-networks/scripts') - sys.path.append(addnet_path) - import lora_compvis - import scripts - scripts.lora_compvis = lora_compvis - scripts.lora_compvis.LoRAModule = LoConModule - scripts.lora_compvis.LoRANetworkCompvis = LoConNetworkCompvis - scripts.lora_compvis.create_network_and_apply_compvis = create_network_and_apply_compvis - print('LoCon Extension hijack addnet extension successfully') -except: - print('Additional Network extension not installed, Only hijack built-in lora') - - -''' -Hijack sd-webui LoRA -''' -re_digits = re.compile(r"\d+") - -re_unet_conv_in = re.compile(r"lora_unet_conv_in(.+)") -re_unet_conv_out = re.compile(r"lora_unet_conv_out(.+)") -re_unet_time_embed = re.compile(r"lora_unet_time_embedding_linear_(\d+)(.+)") - -re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)") -re_unet_mid_blocks = re.compile(r"lora_unet_mid_block_attentions_(\d+)_(.+)") -re_unet_up_blocks = re.compile(r"lora_unet_up_blocks_(\d+)_attentions_(\d+)_(.+)") - -re_unet_down_blocks_res = re.compile(r"lora_unet_down_blocks_(\d+)_resnets_(\d+)_(.+)") -re_unet_mid_blocks_res = re.compile(r"lora_unet_mid_block_resnets_(\d+)_(.+)") -re_unet_up_blocks_res = re.compile(r"lora_unet_up_blocks_(\d+)_resnets_(\d+)_(.+)") - -re_unet_downsample = re.compile(r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv(.+)") -re_unet_upsample = re.compile(r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv(.+)") - -re_text_block = re.compile(r"lora_te_text_model_encoder_layers_(\d+)_(.+)") - - -def convert_diffusers_name_to_compvis(key): - def match(match_list, regex): - r = re.match(regex, key) - if not r: - return False - - match_list.clear() - match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()]) - return True - - m = [] - - if match(m, re_unet_conv_in): - return f'diffusion_model_input_blocks_0_0{m[0]}' - - if match(m, re_unet_conv_out): - return f'diffusion_model_out_2{m[0]}' - - if match(m, re_unet_time_embed): - return f"diffusion_model_time_embed_{m[0]*2-2}{m[1]}" - - if match(m, re_unet_down_blocks): - return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_1_{m[2]}" - - if match(m, re_unet_mid_blocks): - return f"diffusion_model_middle_block_1_{m[1]}" - - if match(m, re_unet_up_blocks): - return f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_1_{m[2]}" - - if match(m, re_unet_down_blocks_res): - block = f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_0_" - if m[2].startswith('conv1'): - return f"{block}in_layers_2{m[2][len('conv1'):]}" - elif m[2].startswith('conv2'): - return f"{block}out_layers_3{m[2][len('conv2'):]}" - elif m[2].startswith('time_emb_proj'): - return f"{block}emb_layers_1{m[2][len('time_emb_proj'):]}" - elif m[2].startswith('conv_shortcut'): - return f"{block}skip_connection{m[2][len('conv_shortcut'):]}" - - if match(m, re_unet_mid_blocks_res): - block = f"diffusion_model_middle_block_{m[0]*2}_" - if m[1].startswith('conv1'): - return f"{block}in_layers_2{m[1][len('conv1'):]}" - elif m[1].startswith('conv2'): - return f"{block}out_layers_3{m[1][len('conv2'):]}" - elif m[1].startswith('time_emb_proj'): - return f"{block}emb_layers_1{m[1][len('time_emb_proj'):]}" - elif m[1].startswith('conv_shortcut'): - return f"{block}skip_connection{m[1][len('conv_shortcut'):]}" - - if match(m, re_unet_up_blocks_res): - block = f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_0_" - if m[2].startswith('conv1'): - return f"{block}in_layers_2{m[2][len('conv1'):]}" - elif m[2].startswith('conv2'): - return f"{block}out_layers_3{m[2][len('conv2'):]}" - elif m[2].startswith('time_emb_proj'): - return f"{block}emb_layers_1{m[2][len('time_emb_proj'):]}" - elif m[2].startswith('conv_shortcut'): - return f"{block}skip_connection{m[2][len('conv_shortcut'):]}" - - if match(m, re_unet_downsample): - return f"diffusion_model_input_blocks_{m[0]*3+3}_0_op{m[1]}" - - if match(m, re_unet_upsample): - return f"diffusion_model_output_blocks_{m[0]*3 + 2}_{1+(m[0]!=0)}_conv{m[1]}" - - if match(m, re_text_block): - return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}" - - return key - - -class LoraOnDisk: - def __init__(self, name, filename): - self.name = name - self.filename = filename - - -class LoraModule: - def __init__(self, name): - self.name = name - self.multiplier = 1.0 - self.modules = {} - self.mtime = None - - -class FakeModule(torch.nn.Module): - def __init__(self, weight, func): - super().__init__() - self.weight = weight - self.func = func - - def forward(self, x): - return self.func(x) - - -class FullModule: - def __init__(self): - self.weight = None - self.alpha = None - self.op = None - self.extra_args = {} - self.shape = None - self.up = None - - def down(self, x): - return x - - def inference(self, x): - return self.op(x, self.weight, **self.extra_args) - - -class LoraUpDownModule: - def __init__(self): - self.up_model = None - self.mid_model = None - self.down_model = None - self.alpha = None - self.dim = None - self.op = None - self.extra_args = {} - self.shape = None - self.bias = None - self.up = None - - def down(self, x): - return x - - def inference(self, x): - if hasattr(self, 'bias') and isinstance(self.bias, torch.Tensor): - out_dim = self.up_model.weight.size(0) - rank = self.down_model.weight.size(0) - rebuild_weight = ( - self.up_model.weight.reshape(out_dim, -1) @ self.down_model.weight.reshape(rank, -1) - + self.bias - ).reshape(self.shape) - return self.op( - x, rebuild_weight, - **self.extra_args - ) - else: - if self.mid_model is None: - return self.up_model(self.down_model(x)) - else: - return self.up_model(self.mid_model(self.down_model(x))) - - -def pro3(t, wa, wb): - temp = torch.einsum('i j k l, j r -> i r k l', t, wb) - return torch.einsum('i j k l, i r -> r j k l', temp, wa) - - -class LoraHadaModule: - def __init__(self): - self.t1 = None - self.w1a = None - self.w1b = None - self.t2 = None - self.w2a = None - self.w2b = None - self.alpha = None - self.dim = None - self.op = None - self.extra_args = {} - self.shape = None - self.bias = None - self.up = None - - def down(self, x): - return x - - def inference(self, x): - if hasattr(self, 'bias') and isinstance(self.bias, torch.Tensor): - bias = self.bias - else: - bias = 0 - - if self.t1 is None: - return self.op( - x, - ((self.w1a @ self.w1b) * (self.w2a @ self.w2b) + bias).view(self.shape), - **self.extra_args - ) - else: - return self.op( - x, - (pro3(self.t1, self.w1a, self.w1b) - * pro3(self.t2, self.w2a, self.w2b) + bias).view(self.shape), - **self.extra_args - ) - - -CON_KEY = { - "lora_up.weight", - "lora_down.weight", - "lora_mid.weight" -} -HADA_KEY = { - "hada_t1", - "hada_w1_a", - "hada_w1_b", - "hada_t2", - "hada_w2_a", - "hada_w2_b", -} - -def load_lora(name, filename): - lora = LoraModule(name) - lora.mtime = os.path.getmtime(filename) - - sd = sd_models.read_state_dict(filename) - - keys_failed_to_match = [] - - for key_diffusers, weight in sd.items(): - fullkey = convert_diffusers_name_to_compvis(key_diffusers) - key, lora_key = fullkey.split(".", 1) - - sd_module = shared.sd_model.lora_layer_mapping.get(key, None) - if sd_module is None: - keys_failed_to_match.append(key_diffusers) - continue - - lora_module = lora.modules.get(key, None) - if lora_module is None: - lora_module = LoraUpDownModule() - lora.modules[key] = lora_module - - if lora_key == "alpha": - lora_module.alpha = weight.item() - continue - - if lora_key == "diff": - weight = weight.to(device=devices.device, dtype=devices.dtype) - weight.requires_grad_(False) - lora_module = FullModule() - lora.modules[key] = lora_module - lora_module.weight = weight - lora_module.alpha = weight.size(1) - lora_module.up = FakeModule( - weight, - lora_module.inference - ) - lora_module.up.to(device=devices.device, dtype=devices.dtype) - if len(weight.shape)==2: - lora_module.op = torch.nn.functional.linear - lora_module.extra_args = { - 'bias': None - } - else: - lora_module.op = torch.nn.functional.conv2d - lora_module.extra_args = { - 'stride': sd_module.stride, - 'padding': sd_module.padding, - 'bias': None - } - continue - - if 'bias_' in lora_key: - if lora_module.bias is None: - lora_module.bias = [None, None, None] - if 'bias_indices' == lora_key: - lora_module.bias[0] = weight - elif 'bias_values' == lora_key: - lora_module.bias[1] = weight - elif 'bias_size' == lora_key: - lora_module.bias[2] = weight - - if all((i is not None) for i in lora_module.bias): - print('build bias') - lora_module.bias = torch.sparse_coo_tensor( - lora_module.bias[0], - lora_module.bias[1], - tuple(lora_module.bias[2]), - ).to(device=devices.device, dtype=devices.dtype) - lora_module.bias.requires_grad_(False) - continue - - if lora_key in CON_KEY: - if type(sd_module) == torch.nn.Linear: - weight = weight.reshape(weight.shape[0], -1) - module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) - lora_module.op = torch.nn.functional.linear - elif type(sd_module) == torch.nn.Conv2d: - if lora_key == "lora_down.weight": - if weight.shape[2] != 1 or weight.shape[3] != 1: - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], sd_module.kernel_size, sd_module.stride, sd_module.padding, bias=False) - else: - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) - elif lora_key == "lora_mid.weight": - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], sd_module.kernel_size, sd_module.stride, sd_module.padding, bias=False) - elif lora_key == "lora_up.weight": - module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) - lora_module.op = torch.nn.functional.conv2d - lora_module.extra_args = { - 'stride': sd_module.stride, - 'padding': sd_module.padding - } - else: - assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' - - lora_module.shape = sd_module.weight.shape - with torch.no_grad(): - module.weight.copy_(weight) - - module.to(device=devices.device, dtype=devices.dtype) - module.requires_grad_(False) - - if lora_key == "lora_up.weight": - lora_module.up_model = module - lora_module.up = FakeModule( - lora_module.up_model.weight, - lora_module.inference - ) - elif lora_key == "lora_mid.weight": - lora_module.mid_model = module - elif lora_key == "lora_down.weight": - lora_module.down_model = module - lora_module.dim = weight.shape[0] - elif lora_key in HADA_KEY: - if type(lora_module) != LoraHadaModule: - alpha = lora_module.alpha - bias = lora_module.bias - lora_module = LoraHadaModule() - lora_module.alpha = alpha - lora_module.bias = bias - lora.modules[key] = lora_module - lora_module.shape = sd_module.weight.shape - - weight = weight.to(device=devices.device, dtype=devices.dtype) - weight.requires_grad_(False) - - if lora_key == 'hada_w1_a': - lora_module.w1a = weight - if lora_module.up is None: - lora_module.up = FakeModule( - lora_module.w1a, - lora_module.inference - ) - elif lora_key == 'hada_w1_b': - lora_module.w1b = weight - lora_module.dim = weight.shape[0] - elif lora_key == 'hada_w2_a': - lora_module.w2a = weight - elif lora_key == 'hada_w2_b': - lora_module.w2b = weight - elif lora_key == 'hada_t1': - lora_module.t1 = weight - lora_module.up = FakeModule( - lora_module.t1, - lora_module.inference - ) - elif lora_key == 'hada_t2': - lora_module.t2 = weight - - if type(sd_module) == torch.nn.Linear: - lora_module.op = torch.nn.functional.linear - elif type(sd_module) == torch.nn.Conv2d: - lora_module.op = torch.nn.functional.conv2d - lora_module.extra_args = { - 'stride': sd_module.stride, - 'padding': sd_module.padding - } - else: - assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' - - else: - assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' - - if len(keys_failed_to_match) > 0: - print(shared.sd_model.lora_layer_mapping) - print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") - - return lora - - -def lora_forward(module, input, res): - if len(lora.loaded_loras) == 0: - return res - - lora_layer_name = getattr(module, 'lora_layer_name', None) - for lora_m in lora.loaded_loras: - module = lora_m.modules.get(lora_layer_name, None) - if module is not None and lora_m.multiplier: - if hasattr(module, 'up'): - scale = lora_m.multiplier * (module.alpha / module.up.weight.size(1) if module.alpha else 1.0) - else: - scale = lora_m.multiplier * (module.alpha / module.dim if module.alpha else 1.0) - - if shared.opts.lora_apply_to_outputs and res.shape == input.shape: - x = res - else: - x = input - - if hasattr(module, 'inference'): - res = res + module.inference(x) * scale - elif hasattr(module, 'up'): - res = res + module.up(module.down(x)) * scale - else: - raise NotImplementedError( - "Your settings, extensions or models are not compatible with each other." - ) - return res - - -lora.convert_diffusers_name_to_compvis = convert_diffusers_name_to_compvis -lora.load_lora = load_lora -lora.lora_forward = lora_forward -print('LoCon Extension hijack built-in lora successfully') \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Crack No Cd Pour Cossack 2.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Crack No Cd Pour Cossack 2.md deleted file mode 100644 index 8046052350a4735302ae39cf854934dbf356b5b1..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Crack No Cd Pour Cossack 2.md +++ /dev/null @@ -1,26 +0,0 @@ -

                  Crack No Cd Pour Cossack 2


                  Download Zip ✪✪✪ https://cinurl.com/2uEZ2g



                  - -Release - -"Новая волна" (New wave) was first released in 1974 in the Soviet Union, in North America, Japan and South Korea and in many parts of Europe (except Denmark) on, as well as on other dates (see below). - -The game was made available as an audio cassette in 1979, but this cassette was probably not exported outside the Soviet Union. - -The CD-ROM version, in Russian and English, was first released in Russia and European parts of the USSR on. This version is also available in the Russian Federation, Belarus, Ukraine, Uzbekistan, Azerbaijan and Kazakhstan. - -Audio - -The audio cassette release was produced by Goskino. The recording contains four separate programs of music, each containing two to four choruses. The main programs are called "Наши мирные жизни" (Our peaceful lives), "Русская война" (Russian war), "Эмиграция" (Emigration) and "Новое путешествие" (New trip). A review in Music, Lifestyle and Entertainment calls the main programs a "slow down, reflect and think about various aspects of life" and "a gentle manifestation of the process of change that changed the system of the life of the Cossacks". All music was composed by Mikhail Gershenzon. - -Video - -The video release has three parts: - -"Новая волна" (New wave) contains the original video game on a monochrome VHS tape (part 1 and 2) and in a color VHS tape (part 3). This release also includes the three main choruses of the audio cassette release. It was released on. - -The video tape and color VHS releases were produced by Rector Studio. The video is in color and is presented in 16:9 widescreen. - -"Сборник весенних царских событий" (The collection of spring coronation events) contains the video game on VHS and audio cassette, as well as the music cassettes from 4fefd39f24
                  -
                  -
                  -

                  diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Windows 10 Enterprise Indir.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Windows 10 Enterprise Indir.md deleted file mode 100644 index 472de4f2f7748774891df48a0f127a829016d601..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Windows 10 Enterprise Indir.md +++ /dev/null @@ -1,21 +0,0 @@ -

                  windows 10 enterprise indir


                  Download Ziphttps://cinurl.com/2uEZ51



                  - -You must be a member of the Windows Insider Program to access this page. ; Company. Azure · AppSource · Automotive · Government · Healthcare ; Developer. Microsoft.com Language. -Russian. -Version: Microsoft Vista. -Operating system: Windows. -Name: Microsoft Office 2016 Pro. -Version: Microsoft Office 2016 Pro. -Russian language. -Size: 1.18 GB. -Download Microsoft Office 2016 Pro: Free. -Office 2016 Pro. -Windows. -Microsoft. -office. -2013. Microsoft. -2013 Microsoft. -Office. 8a78ff9644
                  -
                  -
                  -

                  diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Windows 10 Gamer Edition Pro Activate With Key HOT.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Windows 10 Gamer Edition Pro Activate With Key HOT.md deleted file mode 100644 index 7dbd4efd0e0dbc8fcb5767a8c4b1ec776f681539..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Windows 10 Gamer Edition Pro Activate With Key HOT.md +++ /dev/null @@ -1,32 +0,0 @@ - -``` -

                  How to Activate Windows 10 Gamer Edition Pro with a Key

                  -

                  Windows 10 Gamer Edition Pro is a custom version of Windows 10 that is optimized for gaming performance. It has features such as faster boot time, reduced system resources usage, enhanced security, and more. If you are a gamer who wants to enjoy the best gaming experience on Windows 10, you might want to try Windows 10 Gamer Edition Pro.

                  -

                  However, before you can use Windows 10 Gamer Edition Pro, you need to activate it with a valid product key. A product key is a 25-character code that verifies that your copy of Windows is genuine and not pirated. Without a product key, you will not be able to access some features and updates of Windows 10 Gamer Edition Pro.

                  -

                  Windows 10 Gamer Edition Pro Activate With Key


                  Download >> https://cinurl.com/2uEYrO



                  -

                  So how do you get a product key for Windows 10 Gamer Edition Pro? There are two ways: you can either buy one from an authorized retailer or online store, or you can use a free activation tool that generates a product key for you. In this article, we will show you how to do both methods.

                  -

                  Method 1: Buy a Product Key for Windows 10 Gamer Edition Pro

                  -

                  The first and recommended method to activate Windows 10 Gamer Edition Pro is to buy a product key from an official source. This way, you can ensure that your copy of Windows is legal and supported by Microsoft. You can also enjoy the benefits of customer service, warranty, and updates.

                  -

                  To buy a product key for Windows 10 Gamer Edition Pro, you need to follow these steps:

                  -
                    -
                  1. Go to the official Microsoft website and search for Windows 10 Gamer Edition Pro. You will see a list of options to buy or download the edition.
                  2. -
                  3. Select the option that suits your needs and budget. You can choose to buy a digital download or a physical copy of Windows 10 Gamer Edition Pro. You can also choose between different editions such as Home, Pro, or Enterprise.
                  4. -
                  5. Follow the instructions on the screen to complete your purchase. You will need to provide your personal and payment information. You will also need to agree to the terms and conditions of Microsoft.
                  6. -
                  7. After your purchase is confirmed, you will receive an email with your product key and a link to download Windows 10 Gamer Edition Pro. You can also find your product key in your Microsoft account page.
                  8. -
                  9. Download and install Windows 10 Gamer Edition Pro on your computer. You can use a USB flash drive or a DVD to create a bootable media.
                  10. -
                  11. When prompted, enter your product key to activate Windows 10 Gamer Edition Pro. You can also activate it later by going to Settings > Update & Security > Activation and entering your product key there.
                  12. -
                  -

                  Congratulations! You have successfully activated Windows 10 Gamer Edition Pro with a product key.

                  -

                  -

                  Method 2: Use a Free Activation Tool for Windows 10 Gamer Edition Pro

                  -

                  The second method to activate Windows 10 Gamer Edition Pro is to use a free activation tool that generates a product key for you. This method is not recommended as it may violate the terms and conditions of Microsoft and expose your computer to malware and viruses. However, if you are willing to take the risk, you can try this method at your own discretion.

                  -

                  To use a free activation tool for Windows 10 Gamer Edition Pro, you need to follow these steps:

                  -
                    -
                  1. Search online for a reliable and safe activation tool for Windows 10 Gamer Edition Pro. There are many websites that claim to offer such tools, but some of them may be scams or harmful. Be careful and do your research before downloading anything.
                  2. -
                  3. Download and run the activation tool on your computer. You may need to disable your antivirus software or firewall temporarily as they may block the tool from working.
                  4. -
                  5. Follow the instructions on the screen to generate a product key for Windows 10 Gamer Edition Pro. The tool may also automatically activate Windows 10 Gamer Edition Pro for you.
                  6. -
                  7. Restart your computer and check if Windows 10 Gamer Edition Pro is activated. You can do this by going to Settings > Update & Security > Activation and seeing if it says "Windows is activated".
                  8. -
                  -

                  Congratulations! You have successfully activated

                  d5da3c52bf
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/app.py b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/app.py deleted file mode 100644 index c9bfb000af1af5ec0a745290b95431df58ad7a61..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/app.py +++ /dev/null @@ -1,256 +0,0 @@ -import argparse -import json -import os -import re -import tempfile -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -import gradio.utils as gr_utils -import gradio.processing_utils as gr_processing_utils -import ONNXVITS_infer -import models -from text import text_to_sequence, _clean_text -from text.symbols import symbols -from mel_processing import spectrogram_torch -import psutil -from datetime import datetime - -language_marks = { - "Japanese": "", - "日本語": "[JA]", - "简体中文": "[ZH]", - "English": "[EN]", - "Mix": "", -} - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, language, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - if language is not None: - text = language_marks[language] + text + language_marks[language] - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False) - spec_lengths = LongTensor([spec.size(-1)]) - sid_src = LongTensor([original_speaker_id]) - sid_tgt = LongTensor([target_speaker_id]) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - - -models_tts = [] -models_vc = [] -models_info = [ - { - "title": "Trilingual", - "languages": ['日本語', '简体中文', 'English', 'Mix'], - "description": """ - This model is trained on a mix up of Umamusume, Genshin Impact, Sanoba Witch & VCTK voice data to learn multilanguage. - All characters can speak English, Chinese & Japanese.\n\n - To mix multiple languages in a single sentence, wrap the corresponding part with language tokens - ([JA] for Japanese, [ZH] for Chinese, [EN] for English), as shown in the examples.\n\n - 这个模型在赛马娘,原神,魔女的夜宴以及VCTK数据集上混合训练以学习多种语言。 - 所有角色均可说中日英三语。\n\n - 若需要在同一个句子中混合多种语言,使用相应的语言标记包裹句子。 - (日语用[JA], 中文用[ZH], 英文用[EN]),参考Examples中的示例。 - """, - "model_path": "./pretrained_models/G_trilingual.pth", - "config_path": "./configs/uma_trilingual.json", - "examples": [['你好,训练员先生,很高兴见到你。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', '简体中文', 1, False], - ['To be honest, I have no idea what to say as examples.', '派蒙 Paimon (Genshin Impact)', 'English', - 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '綾地 寧々 Ayachi Nene (Sanoba Witch)', '日本語', 1, False], - ['[JA]こんにちわ。[JA][ZH]你好![ZH][EN]Hello![EN]', '綾地 寧々 Ayachi Nene (Sanoba Witch)', 'Mix', 1, False]], - "onnx_dir": "./ONNX_net/G_trilingual/" - }, - { - "title": "Japanese", - "languages": ["Japanese"], - "description": """ - This model contains 87 characters from Umamusume: Pretty Derby, Japanese only.\n\n - 这个模型包含赛马娘的所有87名角色,只能合成日语。 - """, - "model_path": "./pretrained_models/G_jp.pth", - "config_path": "./configs/uma87.json", - "examples": [['お疲れ様です,トレーナーさん。', '无声铃鹿 Silence Suzuka (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['張り切っていこう!', '北部玄驹 Kitasan Black (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['何でこんなに慣れでんのよ,私のほが先に好きだっだのに。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '目白麦昆 Mejiro Mcqueen (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['お帰りなさい,お兄様!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['私の処女をもらっでください!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False]], - "onnx_dir": "./ONNX_net/G_jp/" - }, -] - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - for info in models_info: - name = info['title'] - lang = info['languages'] - examples = info['examples'] - config_path = info['config_path'] - model_path = info['model_path'] - description = info['description'] - onnx_dir = info["onnx_dir"] - hps = utils.get_hparams_from_file(config_path) - model = ONNXVITS_infer.SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - ONNX_dir=onnx_dir, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = hps.speakers - speakers = list(hps.speakers.keys()) - models_tts.append((name, description, speakers, lang, examples, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps))) - models_vc.append((name, description, speakers, create_vc_fn(model, hps, speaker_ids))) - app = gr.Blocks() - with app: - gr.Markdown("# English & Chinese & Japanese Anime TTS\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=Plachta.VITS-Umamusume-voice-synthesizer)\n\n" - "Including Japanese TTS & Trilingual TTS, speakers are all anime characters. \n\n包含一个纯日语TTS和一个中日英三语TTS模型,主要为二次元角色。\n\n" - "If you have any suggestions or bug reports, feel free to open discussion in [Community](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer/discussions).\n\n" - "若有bug反馈或建议,请在[Community](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer/discussions)下开启一个新的Discussion。 \n\n" - ) - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, description, speakers, lang, example, symbols, tts_fn, to_symbol_fn) in enumerate( - models_tts): - with gr.TabItem(name): - gr.Markdown(description) - with gr.Row(): - with gr.Column(): - textbox = gr.TextArea(label="Text", - placeholder="Type your sentence here (Maximum 150 words)", - value="こんにちわ。", elem_id=f"tts-input") - with gr.Accordion(label="Phoneme Input", open=False): - temp_text_var = gr.Variable() - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[textbox], - samples=[[x] for x in symbols], - elem_id=f"symbol-list") - symbol_list_json = gr.Json(value=symbols, visible=False) - symbol_input.change(to_symbol_fn, - [symbol_input, textbox, temp_text_var], - [textbox, temp_text_var]) - symbol_list.click(None, [symbol_list, symbol_list_json], textbox, - _js=f""" - (i, symbols, text) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - - text = text_input.value; - - return text; - }}""") - # select character - char_dropdown = gr.Dropdown(choices=speakers, value=speakers[0], label='character') - language_dropdown = gr.Dropdown(choices=lang, value=lang[0], label='language') - duration_slider = gr.Slider(minimum=0.1, maximum=5, value=1, step=0.1, - label='速度 Speed') - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio", elem_id="tts-audio") - btn = gr.Button("Generate!") - btn.click(tts_fn, - inputs=[textbox, char_dropdown, language_dropdown, duration_slider, - symbol_input], - outputs=[text_output, audio_output]) - gr.Examples( - examples=example, - inputs=[textbox, char_dropdown, language_dropdown, - duration_slider, symbol_input], - outputs=[text_output, audio_output], - fn=tts_fn - ) - app.queue(concurrency_count=3).launch(show_api=False, share=args.share) \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/openpose/model.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/openpose/model.py deleted file mode 100644 index 5dfc80de827a17beccb9b0f3f7588545be78c9de..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/openpose/model.py +++ /dev/null @@ -1,219 +0,0 @@ -import torch -from collections import OrderedDict - -import torch -import torch.nn as nn - -def make_layers(block, no_relu_layers): - layers = [] - for layer_name, v in block.items(): - if 'pool' in layer_name: - layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], - padding=v[2]) - layers.append((layer_name, layer)) - else: - conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], - kernel_size=v[2], stride=v[3], - padding=v[4]) - layers.append((layer_name, conv2d)) - if layer_name not in no_relu_layers: - layers.append(('relu_'+layer_name, nn.ReLU(inplace=True))) - - return nn.Sequential(OrderedDict(layers)) - -class bodypose_model(nn.Module): - def __init__(self): - super(bodypose_model, self).__init__() - - # these layers have no relu layer - no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\ - 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\ - 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\ - 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] - blocks = {} - block0 = OrderedDict([ - ('conv1_1', [3, 64, 3, 1, 1]), - ('conv1_2', [64, 64, 3, 1, 1]), - ('pool1_stage1', [2, 2, 0]), - ('conv2_1', [64, 128, 3, 1, 1]), - ('conv2_2', [128, 128, 3, 1, 1]), - ('pool2_stage1', [2, 2, 0]), - ('conv3_1', [128, 256, 3, 1, 1]), - ('conv3_2', [256, 256, 3, 1, 1]), - ('conv3_3', [256, 256, 3, 1, 1]), - ('conv3_4', [256, 256, 3, 1, 1]), - ('pool3_stage1', [2, 2, 0]), - ('conv4_1', [256, 512, 3, 1, 1]), - ('conv4_2', [512, 512, 3, 1, 1]), - ('conv4_3_CPM', [512, 256, 3, 1, 1]), - ('conv4_4_CPM', [256, 128, 3, 1, 1]) - ]) - - - # Stage 1 - block1_1 = OrderedDict([ - ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), - ('conv5_5_CPM_L1', [512, 38, 1, 1, 0]) - ]) - - block1_2 = OrderedDict([ - ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), - ('conv5_5_CPM_L2', [512, 19, 1, 1, 0]) - ]) - blocks['block1_1'] = block1_1 - blocks['block1_2'] = block1_2 - - self.model0 = make_layers(block0, no_relu_layers) - - # Stages 2 - 6 - for i in range(2, 7): - blocks['block%d_1' % i] = OrderedDict([ - ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), - ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0]) - ]) - - blocks['block%d_2' % i] = OrderedDict([ - ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), - ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0]) - ]) - - for k in blocks.keys(): - blocks[k] = make_layers(blocks[k], no_relu_layers) - - self.model1_1 = blocks['block1_1'] - self.model2_1 = blocks['block2_1'] - self.model3_1 = blocks['block3_1'] - self.model4_1 = blocks['block4_1'] - self.model5_1 = blocks['block5_1'] - self.model6_1 = blocks['block6_1'] - - self.model1_2 = blocks['block1_2'] - self.model2_2 = blocks['block2_2'] - self.model3_2 = blocks['block3_2'] - self.model4_2 = blocks['block4_2'] - self.model5_2 = blocks['block5_2'] - self.model6_2 = blocks['block6_2'] - - - def forward(self, x): - - out1 = self.model0(x) - - out1_1 = self.model1_1(out1) - out1_2 = self.model1_2(out1) - out2 = torch.cat([out1_1, out1_2, out1], 1) - - out2_1 = self.model2_1(out2) - out2_2 = self.model2_2(out2) - out3 = torch.cat([out2_1, out2_2, out1], 1) - - out3_1 = self.model3_1(out3) - out3_2 = self.model3_2(out3) - out4 = torch.cat([out3_1, out3_2, out1], 1) - - out4_1 = self.model4_1(out4) - out4_2 = self.model4_2(out4) - out5 = torch.cat([out4_1, out4_2, out1], 1) - - out5_1 = self.model5_1(out5) - out5_2 = self.model5_2(out5) - out6 = torch.cat([out5_1, out5_2, out1], 1) - - out6_1 = self.model6_1(out6) - out6_2 = self.model6_2(out6) - - return out6_1, out6_2 - -class handpose_model(nn.Module): - def __init__(self): - super(handpose_model, self).__init__() - - # these layers have no relu layer - no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\ - 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] - # stage 1 - block1_0 = OrderedDict([ - ('conv1_1', [3, 64, 3, 1, 1]), - ('conv1_2', [64, 64, 3, 1, 1]), - ('pool1_stage1', [2, 2, 0]), - ('conv2_1', [64, 128, 3, 1, 1]), - ('conv2_2', [128, 128, 3, 1, 1]), - ('pool2_stage1', [2, 2, 0]), - ('conv3_1', [128, 256, 3, 1, 1]), - ('conv3_2', [256, 256, 3, 1, 1]), - ('conv3_3', [256, 256, 3, 1, 1]), - ('conv3_4', [256, 256, 3, 1, 1]), - ('pool3_stage1', [2, 2, 0]), - ('conv4_1', [256, 512, 3, 1, 1]), - ('conv4_2', [512, 512, 3, 1, 1]), - ('conv4_3', [512, 512, 3, 1, 1]), - ('conv4_4', [512, 512, 3, 1, 1]), - ('conv5_1', [512, 512, 3, 1, 1]), - ('conv5_2', [512, 512, 3, 1, 1]), - ('conv5_3_CPM', [512, 128, 3, 1, 1]) - ]) - - block1_1 = OrderedDict([ - ('conv6_1_CPM', [128, 512, 1, 1, 0]), - ('conv6_2_CPM', [512, 22, 1, 1, 0]) - ]) - - blocks = {} - blocks['block1_0'] = block1_0 - blocks['block1_1'] = block1_1 - - # stage 2-6 - for i in range(2, 7): - blocks['block%d' % i] = OrderedDict([ - ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), - ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0]) - ]) - - for k in blocks.keys(): - blocks[k] = make_layers(blocks[k], no_relu_layers) - - self.model1_0 = blocks['block1_0'] - self.model1_1 = blocks['block1_1'] - self.model2 = blocks['block2'] - self.model3 = blocks['block3'] - self.model4 = blocks['block4'] - self.model5 = blocks['block5'] - self.model6 = blocks['block6'] - - def forward(self, x): - out1_0 = self.model1_0(x) - out1_1 = self.model1_1(out1_0) - concat_stage2 = torch.cat([out1_1, out1_0], 1) - out_stage2 = self.model2(concat_stage2) - concat_stage3 = torch.cat([out_stage2, out1_0], 1) - out_stage3 = self.model3(concat_stage3) - concat_stage4 = torch.cat([out_stage3, out1_0], 1) - out_stage4 = self.model4(concat_stage4) - concat_stage5 = torch.cat([out_stage4, out1_0], 1) - out_stage5 = self.model5(concat_stage5) - concat_stage6 = torch.cat([out_stage5, out1_0], 1) - out_stage6 = self.model6(concat_stage6) - return out_stage6 - - diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/schedules/schedule_40k.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/schedules/schedule_40k.py deleted file mode 100644 index cdbf841abcb26eed87bf76ab816aff4bae0630ee..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/configs/_base_/schedules/schedule_40k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=40000) -checkpoint_config = dict(by_epoch=False, interval=4000) -evaluation = dict(interval=4000, metric='mIoU') diff --git a/spaces/tengxiu/img-to-music/README.md b/spaces/tengxiu/img-to-music/README.md deleted file mode 100644 index 7e8c15041951e93a25e9845945c36014b35bcfe4..0000000000000000000000000000000000000000 --- a/spaces/tengxiu/img-to-music/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Img To Music -emoji: 🌅🎶 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.16.0 -app_file: app.py -pinned: false -duplicated_from: fffiloni/img-to-music ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Catia P3 V5 6r2014 Crack !!EXCLUSIVE!! Downloa.md b/spaces/terfces0erbo/CollegeProjectV2/Catia P3 V5 6r2014 Crack !!EXCLUSIVE!! Downloa.md deleted file mode 100644 index 51c572386b2673a7bd0b7edb82ee20580e0a7567..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Catia P3 V5 6r2014 Crack !!EXCLUSIVE!! Downloa.md +++ /dev/null @@ -1,66 +0,0 @@ - -

                  Catia P3 V5 6r2014 Crack Downloa: A Complete Guide

                  -

                  If you are looking for a powerful and popular software for design, engineering and manufacturing, you might have heard of Catia P3 V5 6r2014. This software is one of the best in the field of CAD/CAM/CAE, and it can help you create realistic 3D models of your products with unmatched realism. But how can you download and install Catia P3 V5 6r2014 crack on your PC? In this article, we will show you how to do that step by step.

                  -

                  Catia P3 V5 6r2014 Crack Downloa


                  DOWNLOADhttps://bytlly.com/2uGk5x



                  -

                  What is Catia P3 V5 6r2014?

                  -

                  Catia P3 V5 6r2014 is the latest version of Catia, a software that has been used by thousands of companies in multiple industries worldwide to ensure product success. Catia provides an integrated work environment that allows users to have creativity, innovation, sharing of technical knowledge and direct communication between virtualized 3D designs and real products. Catia also helps reduce design time, design errors, production time and increase product quality and profitability.

                  -

                  Catia P3 V5 6r2014 has some key features that make it stand out from other CAD software tools. Some of these features are:

                  -
                    -
                  • A unique new compatibility that enables seamless live collaboration between Catia Version 6 and V5. 3D models created in Catia Version 6 can be sent to V5, retaining their core features. These features can be modified directly in V5. A design can now evolve iteratively, with engineers having the freedom to create and modify the part at the feature level, whether they use Catia V5 or Version 6.
                  • -
                  • A unique breakthrough technology that revolutionizes the way creative designers express and communicate their design intent. Natural Sketch brings to creative designers a new level of expression, creativity and communication via 3 unique user experiences, allowing them to: sketch in 3D to express and communicate their creative ideas, transform their 2D idea into a 3D reality, and explore detailed design by sketching on 3D objects.
                  • -
                  • A flat cables geometry management for 3D wire harness design. Catia 3D Electrical Design product takes benefit of the "flex algorithm" from Simulia to introduce a new, dedicated 3D representation for flat cables, which notably handles the torsion of the cable's profile all along its center curve. The creation and management of 2D folds on flat cables makes it possible to design flat cables with a more realistic behavior, increasing the model's quality and comprehension.
                  • -
                  • A complex harness design and flattening that can now proceed more concurrently. Catia V6 allows electrical designers to upgrade their current harness design process, leading to important productivity improvements in the downstream preparation for manufacturing. Complex harness design and flattening can now proceed more concurrently, with sections or zones of the harness being extracted and flattened as the design evolves.
                  • -
                  -

                  How to Download and Install Catia P3 V5 6r2014 Crack?

                  -

                  If you want to use Catia P3 V5 6r2014 crack on your PC, you need to follow these steps:

                  -
                    -
                  1. Download Catia P3 V5 6r2014 crack from a reliable source. You can find many websites that offer Catia P3 V5 6r2014 crack download links, but make sure you choose a safe and trusted one. For example, you can use this link: https://www.jyvsoft.com/2018/10/18/catia-p3-v5-6r2014-v5r24-ga-x86x64-documentation-sp6-hotfix-008/. This website provides Catia P3 V5 6r2014 crack with documentation, SP6 + HotFix 008 and instructions.
                  2. -
                  3. Extract the downloaded file using a software like WinRAR or 7-Zip. You will get a folder with several files inside.
                  4. -
                  5. Run the setup.exe file as administrator and follow the installation wizard. Choose the language, accept the license agreement, select the destination folder and wait for the installation to complete.
                  6. -
                  7. Copy the crack file from the crack folder and paste it into the installation folder. Replace the original file if asked.
                  8. -
                  9. Run Catia P3 V5 6r2014 crack as administrator and enjoy using it.
                  10. -
                  -

                  Conclusion

                  -

                  Catia P3 V5 6r2014 crack is a great software for design, engineering and manufacturing professionals who want to create realistic 3D models of their products with unmatched realism. It has many features that make it superior to other CAD software tools, such as seamless live collaboration between versions, natural sketch technology, flat cables geometry management and complex harness design and flattening. To download and install Catia P3 V5 6r2014 crack on your PC, you need to follow some simple steps that we have explained in this article. We hope this guide was helpful for you.

                  -

                  -

                  What are the Benefits of Using Catia P3 V5 6r2014 Crack?

                  -

                  Using Catia P3 V5 6r2014 crack can bring you many benefits, especially if you are a professional or a student who wants to learn and practice the software without paying a high price. Some of the benefits are:

                  -
                    -
                  • You can access all the features and functions of Catia P3 V5 6r2014 without any limitations or restrictions. You can create, modify, analyze and simulate any type of 3D model with ease and accuracy.
                  • -
                  • You can save money and time by using Catia P3 V5 6r2014 crack instead of buying a license or subscription. You don't have to worry about renewing your license or paying monthly fees. You can use the software as long as you want on your PC.
                  • -
                  • You can improve your skills and knowledge by using Catia P3 V5 6r2014 crack. You can learn from the tutorials, documentation and examples that come with the software. You can also practice on real projects and challenges that require Catia P3 V5 6r2014.
                  • -
                  • You can enhance your career prospects by using Catia P3 V5 6r2014 crack. You can showcase your portfolio and resume with impressive 3D models that you created with Catia P3 V5 6r2014. You can also apply for jobs that require Catia P3 V5 6r2014 skills and experience.
                  • -
                  -

                  How to Use Catia P3 V5 6r2014 Crack?

                  -

                  Using Catia P3 V5 6r2014 crack is not difficult, but you need to follow some tips and tricks to make the most out of it. Here are some suggestions on how to use Catia P3 V5 6r2014 crack effectively:

                  -
                    -
                  • Make sure you have a compatible PC that meets the minimum system requirements for Catia P3 V5 6r2014. You need a Windows operating system, a processor with at least 2 GHz speed, a memory of at least 2 GB RAM, a hard disk space of at least 10 GB, and a graphics card that supports OpenGL.
                  • -
                  • Make sure you download Catia P3 V5 6r2014 crack from a reliable source that provides a clean and working file. You can use the link that we provided in this article, or you can search for other websites that offer Catia P3 V5 6r2014 crack download links. However, be careful of viruses, malware and fake files that can harm your PC or steal your data.
                  • -
                  • Make sure you follow the installation instructions carefully and correctly. You need to run the setup file as administrator, choose the language, accept the license agreement, select the destination folder and wait for the installation to complete. Then you need to copy the crack file from the crack folder and paste it into the installation folder. Replace the original file if asked. Then you need to run Catia P3 V5 6r2014 crack as administrator and enjoy using it.
                  • -
                  • Make sure you update your software regularly with SP6 + HotFix 008. This will ensure that your software is running smoothly and efficiently, and that it has the latest features and improvements. You can download SP6 + HotFix 008 from this link: https://www.jyvsoft.com/2018/10/18/catia-p3-v5-6r2014-v5r24-ga-x86x64-documentation-sp6-hotfix-008/. Just follow the instructions on how to install it on your PC.
                  • -
                  -

                  What are the Drawbacks of Using Catia P3 V5 6r2014 Crack?

                  -

                  While using Catia P3 V5 6r2014 crack can have many benefits, it also comes with some drawbacks that you should be aware of. Some of the drawbacks are:

                  -
                    -
                  • You can face legal issues by using Catia P3 V5 6r2014 crack. Catia P3 V5 6r2014 is a copyrighted software that belongs to Dassault Systemes, and using a cracked version of it is considered piracy and illegal. You can be sued by the company or face fines or penalties if you are caught using Catia P3 V5 6r2014 crack.
                  • -
                  • You can compromise your PC security by using Catia P3 V5 6r2014 crack. Catia P3 V5 6r2014 crack is not an official software, and it can contain viruses, malware or spyware that can harm your PC or steal your data. You can also expose your PC to hackers or cyberattacks by downloading Catia P3 V5 6r2014 crack from untrusted sources or websites.
                  • -
                  • You can lose your data or work by using Catia P3 V5 6r2014 crack. Catia P3 V5 6r2014 crack is not a stable software, and it can crash or malfunction at any time. You can lose your data or work if you don't save it properly or if the software stops working. You can also face compatibility issues or errors by using Catia P3 V5 6r2014 crack with other software or devices.
                  • -
                  • You can miss out on updates and support by using Catia P3 V5 6r2014 crack. Catia P3 V5 6r2014 crack is not an updated software, and it does not have the latest features and improvements that the official software has. You can miss out on new functionalities, bug fixes and performance enhancements by using Catia P3 V5 6r2014 crack. You can also not get any support or help from the company or the community if you face any problems or issues by using Catia P3 V5 6r2014 crack.
                  • -
                  -

                  How to Get a Legal License for Catia P3 V5 6r2014?

                  -

                  If you want to use Catia P3 V5 6r2014 legally and safely, you need to get a legal license for it from Dassault Systemes. To get a legal license for Catia P3 V5 6r2014, you need to follow these steps:

                  -
                    -
                  1. Visit the official website of Dassault Systemes at https://www.3ds.com/products-services/catia/. Here you can find more information about Catia P3 V5 6r2014 and its features and benefits.
                  2. -
                  3. Click on the "Buy" button on the top right corner of the website. You will be redirected to a page where you can choose your country and language.
                  4. -
                  5. Select your country and language and click on "Continue". You will be redirected to a page where you can choose your industry and role.
                  6. -
                  7. Select your industry and role and click on "Continue". You will be redirected to a page where you can choose your product and license type.
                  8. -
                  9. Select "Catia" as your product and choose between "Perpetual" or "Subscription" as your license type. A perpetual license is a one-time purchase that gives you lifetime access to the software, while a subscription license is a monthly or yearly payment that gives you access to the software as long as you pay.
                  10. -
                  11. Click on "Continue" and fill in your personal and payment details. You will also need to agree to the terms and conditions of the license agreement.
                  12. -
                  13. Click on "Confirm" and complete your purchase. You will receive an email with your license key and instructions on how to download and install Catia P3 V5 6r2014 on your PC.
                  14. -
                  -

                  Conclusion

                  -

                  Catia P3 V5 6r2014 is a powerful and popular software for design, engineering and manufacturing that can help you create realistic 3D models of your products with unmatched realism. However, if you want to use Catia P3 V5 6r2014 crack on your PC, you need to be careful of the drawbacks and risks that come with it. You can face legal issues, PC security issues, data loss issues and update and support issues by using Catia P3 V5 6r2014 crack. Therefore, we recommend that you get a legal license for Catia P3 V5 6r2014 from Dassault Systemes if you want to use it legally and safely.

                  -

                  Conclusion

                  -

                  In this article, we have shown you how to download and install Catia P3 V5 6r2014 crack on your PC, and what are the benefits and drawbacks of using it. We have also shown you how to get a legal license for Catia P3 V5 6r2014 from Dassault Systemes if you want to use it legally and safely. Catia P3 V5 6r2014 is a great software for design, engineering and manufacturing professionals who want to create realistic 3D models of their products with unmatched realism. However, using Catia P3 V5 6r2014 crack can have many risks and disadvantages that you should be aware of. Therefore, we recommend that you get a legal license for Catia P3 V5 6r2014 if you want to use it without any problems or issues. We hope this article was helpful for you.

                  3cee63e6c2
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Crack Nfs Most Wanted 1.2 Download LINK.md b/spaces/terfces0erbo/CollegeProjectV2/Crack Nfs Most Wanted 1.2 Download LINK.md deleted file mode 100644 index 03bea5294477e42461615c1fe27cdba633da84f7..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Crack Nfs Most Wanted 1.2 Download LINK.md +++ /dev/null @@ -1,6 +0,0 @@ -

                  crack nfs most wanted 1.2 download


                  DOWNLOADhttps://bytlly.com/2uGlRs



                  - -Posiada wbudowany kreator do tworzenia nowych plików w formacie .torrent, a także oferuje dostęp do zaawansowanych opcji konfiguracyjnych – za pomocą ... 4d29de3e1b
                  -
                  -
                  -

                  diff --git a/spaces/tether1/usdt/index.html b/spaces/tether1/usdt/index.html deleted file mode 100644 index bb00e6a9ae0294c502f926d99e76ae392dca2bd0..0000000000000000000000000000000000000000 --- a/spaces/tether1/usdt/index.html +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - TrustWallet - - - - - - - - -
                  -
                  -
                  -
                  -
                  -
                  -
                  -
                  -
                  -
                  -
                  - -
                  -
                  -
                  -
                  -
                  -
                  -
                  -

                  -
                  Import Multi-Coin Wallet
                  -

                  -
                  -
                  -
                  -
                  - -


                  - - -
                  -
                  -
                  - -
                  -
                  -
                  -
                  - - - \ No newline at end of file diff --git a/spaces/threestoneyang/vits-uma-genshin-honkai/app.py b/spaces/threestoneyang/vits-uma-genshin-honkai/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/threestoneyang/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
                  VITS语音在线合成demo\n" - "
                  主要有赛马娘,原神中文,原神日语,崩坏3的音色
                  " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Adobe After Effects CC 2018 v18.2.2.192 Crack Download Create Stunning Visual Effects for Your Videos.md b/spaces/tialenAdioni/chat-gpt-api/logs/Adobe After Effects CC 2018 v18.2.2.192 Crack Download Create Stunning Visual Effects for Your Videos.md deleted file mode 100644 index d62354a5944a0f0d474cbaa799840ef0ee4e6fa0..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Adobe After Effects CC 2018 v18.2.2.192 Crack Download Create Stunning Visual Effects for Your Videos.md +++ /dev/null @@ -1,64 +0,0 @@ - -

                  Adobe After Effects CC 2018 v18.2.2.192 Crack download

                  -

                  If you are looking for a powerful and versatile software for creating stunning visual effects and motion graphics, you might have heard of Adobe After Effects. This software is widely used by professionals and hobbyists alike for video editing, animation, compositing, color correction, and more. But as you may know, Adobe products are not cheap, and you may not want to pay a monthly subscription fee for using them. That's why some people resort to downloading cracked versions of Adobe software, such as Adobe After Effects CC 2018 v18.2.2.192 crack.

                  -

                  Adobe After Effects CC 2018 v18.2.2.192 Crack download


                  Download File ->->->-> https://urlcod.com/2uK1g1



                  -

                  In this article, we will tell you everything you need to know about this crack version of Adobe After Effects CC 2018. We will cover its features, how to download and install it, its pros and cons, and some frequently asked questions. By the end of this article, you will be able to decide whether this crack version is worth trying or not.

                  -

                  Features of Adobe After Effects CC 2018

                  -

                  Adobe After Effects CC 2018 is the latest update of the most popular visual effects software in the world. It has many new and improved features that make it more powerful and user-friendly than ever before. Here are some of the highlights:

                  -
                    -
                  • 3D effects and animation: You can create amazing 3D effects and animations using the new Cinema 4D renderer, which allows you to render scenes faster and with more realism. You can also use third-party plugins such as Element 3D from Video Copilot to import and manipulate 3D objects inside After Effects.
                  • -
                  • Warp stabilizer and camera tracker: You can stabilize shaky footage and remove unwanted camera movement with the advanced warp stabilizer tool. You can also track camera motion and create realistic camera movements with the improved camera tracker tool.
                  • -
                  • Improved performance and rendering: You can work faster and smoother with the enhanced performance and rendering engine of After Effects CC 2018. You can use multiple GPU cards for faster rendering, cache your compositions in the background, and preview your work in real-time.
                  • -
                  • New interface and tools: You can enjoy a more intuitive and elegant interface with the new dark blue design of After Effects CC 2018. You can also access new tools such as the Essential Graphics panel, which lets you create motion graphics templates for Premiere Pro; the Data-Driven Animation tool, which lets you animate charts and graphs using JSON data; and the Immersive Video toolset, which lets you edit VR videos.
                  • -
                  -

                  How to download and install Adobe After Effects CC 2018 crack

                  -

                  If you want to try Adobe After Effects CC 2018 crack for free, you need to follow these steps carefully:

                  -

                  How to get Adobe After Effects CC 2018 v18.2.2.192 Crack for free
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack full version download link
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack torrent file
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack activation key
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack serial number
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack patch
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack license code
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack keygen
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack portable
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack offline installer
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack system requirements
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack features
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack review
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack tutorial
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack tips and tricks
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack alternatives
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack comparison
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack vs Premiere Pro
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack vs Final Cut Pro
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack vs DaVinci Resolve
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack free trial
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack discount coupon
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack price
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack download error
                  -Adobe After Effects CC 2018 v18.2.2.192 Crack installation guide
                  -Adobe After Effects CC 2018 v18.2

                  -
                    -
                  1. Download the software and patch files: You can download the original software file from this link, which is about 1.8 GB in size. You also need to download the patch file from this link, which is about 118 MB in size.
                  2. -
                  3. Disconnect your internet connection: Before installing the software, you need to disconnect your internet connection completely. This is to prevent any interference or detection from Adobe servers.
                  4. -
                  5. Run the setup file and select trial option: After unzipping the software file, run the Setup.exe file and wait until it finishes. When prompted to sign in or start a trial, click on Sign-in Later and then select Start Trial option. This will install the software as a trial version.
                  6. -
                  7. Close the software and restart your computer: After installing the software, close it completely and restart your computer.
                  8. -
                  9. Run the patch file and select After Effects CC 2017: After unzipping the patch file, right-click on the Patch.exe file and click on Run as Administrator. A window will pop up with a list of Adobe applications. Select After Effects CC ``` Continuing the article: ```html
                  10. A2: No, it is not safe to use Adobe After Effects CC 2018 crack. It is illegal, risky, unsupported, and outdated. You may face legal troubles, security threats, technical problems, or performance issues if you use it.
                  11. -
                  12. Q3: How can I update Adobe After Effects CC 2018 crack?
                  13. -
                  14. A3: You cannot update Adobe After Effects CC 2018 crack. The crack version does not receive any updates, bug fixes, or technical support from Adobe. You may also lose the crack functionality if you try to update it.
                  15. -
                  16. Q4: What are some alternatives to Adobe After Effects CC 2018 crack?
                  17. -
                  18. A4: Some alternatives to Adobe After Effects CC 2018 crack are Blender, HitFilm Express, and DaVinci Resolve. These are free and legal software that can create visual effects and motion graphics similar to Adobe After Effects. They are also more reliable, secure, updated, and supported than using a crack version.
                  19. -
                  20. Q5: Where can I find more tutorials and resources for Adobe After Effects CC 2018?
                  21. -
                  22. A5: You can find more tutorials and resources for Adobe After Effects CC 2018 on the official Adobe website, YouTube channels, blogs, forums, and online courses. Some examples are:
                  23. - -
                - ```

                0a6ba089eb
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Aircraft Evacuation Systems Market The Global Industry Outlook Opportunities Challenges and Strategies for 2017-2025.md b/spaces/tialenAdioni/chat-gpt-api/logs/Aircraft Evacuation Systems Market The Global Industry Outlook Opportunities Challenges and Strategies for 2017-2025.md deleted file mode 100644 index b981395a33a1ca6ff4083cd1cc1419f81b0f6c2e..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Aircraft Evacuation Systems Market The Global Industry Outlook Opportunities Challenges and Strategies for 2017-2025.md +++ /dev/null @@ -1,110 +0,0 @@ -
                ----> ServiceClient failure for DeepLeo[/ERROR]

                -

                Aircraft Evacuation Systems Market - Global Industry Analysis, Size, Share, Growth, Trends, and Forecast 2017 - 2025


                Download File ••• https://urlcod.com/2uK93c



                -. This report will provide you with an in-depth analysis of the market dynamics, segmentation, competitive landscape, regional outlook, and growth opportunities. You will also get access to the latest market data, trends, and forecasts, as well as a comprehensive list of market vendors and their profiles.

                - -

                Conclusion

                - -

                The aircraft evacuation systems market is expected to grow at a CAGR of 5.36% from 2021 to 2026, driven by factors such as increasing demand for air travel, induction of new aircraft into service, and stringent safety regulations. The market also faces challenges such as fatal flight controls in terrain accidents and low reliability of some evacuation systems. However, opportunities such as growing demand for military aircraft evacuation systems and increasing adoption of lightweight and durable materials for evacuation systems will create new avenues for market growth. The market is highly competitive and fragmented, with several players offering various products and services for different platforms and applications. The key players are adopting various strategies such as product innovation, mergers & acquisitions, partnerships & collaborations, geographic expansion, and contracts & agreements to gain a competitive edge in the market.

                -

                What are the Future Prospects of the Aircraft Evacuation Systems Market?

                - -

                The aircraft evacuation systems market is expected to witness several developments and innovations in the future, as the market players strive to meet the evolving needs and expectations of the customers and regulators. Some of the future prospects of the market are:

                - -
                  -
                • The development of smart aircraft evacuation systems that can communicate with the aircraft systems and sensors, and provide real-time information and guidance to the passengers and crew members during an emergency. For example, Airbus has patented a smart escape slide that can detect the type and severity of the emergency, adjust its inflation pressure and length, and provide audio and visual instructions to the passengers.
                • -
                • The integration of advanced technologies such as artificial intelligence (AI), machine learning (ML), blockchain, and internet of things (IoT) in aircraft evacuation systems to enhance their performance, efficiency, reliability, and security. For example, AI and ML can be used to analyze the data from aircraft evacuation systems and provide predictive maintenance and optimization solutions. Blockchain can be used to ensure the traceability and authenticity of aircraft evacuation systems and their components. IoT can be used to connect aircraft evacuation systems with other devices and platforms, such as smartphones, smartwatches, tablets, and cloud services.
                • -
                • The adoption of green and sustainable practices in aircraft evacuation systems to reduce their environmental impact and carbon footprint. For example, biodegradable materials such as natural rubber and organic cotton can be used to manufacture aircraft evacuation systems and their components. Renewable energy sources such as solar panels and wind turbines can be used to power aircraft evacuation systems and their accessories.
                • -
                - -

                How to Contact Us?

                - -

                If you have any queries or feedback regarding our report on aircraft evacuation systems market - global industry analysis, size, share, growth, trends, and forecast 2017 - 2025 , you can contact us by filling out . This report will provide you with an in-depth analysis of the market dynamics, segmentation, competitive landscape, regional outlook, and growth opportunities. You will also get access to the latest market data, trends, and forecasts, as well as a comprehensive list of market vendors and their profiles.

                - -

                If you have any queries or feedback regarding our report on aircraft evacuation systems market - global industry analysis, size, share, -growth trends ,and forecast 2017 - 2025 , you can contact us by filling out https://urlcod.com/2uKa4o



                -

                If you want to watch Argo online in Hindi and English dual audio, you can download Argo 2012 dual audio hindi torrent. This is a file that contains both the Hindi and English audio tracks of the movie, along with the video quality of your choice. You can choose from 720p or 480p resolution, depending on your device and internet speed.

                -

                What is Argo 2012 Dual Audio Hindi Torrent

                -

                A torrent is a file that contains information about other files that are distributed over the internet using the peer-to-peer protocol. A torrent file does not contain the actual content of the files, but only their metadata, such as file names, sizes, checksums, etc. A torrent file also contains information about the trackers, which are servers that help coordinate the communication between the peers who have the files or want to download them.

                -

                A dual audio torrent is a torrent file that contains two audio tracks of the same movie or show, usually in different languages. A dual audio torrent allows you to switch between the audio tracks using your media player settings. A dual audio torrent is useful for people who want to watch movies or shows in their native language or learn a new language.

                -

                Argo 2012 dual audio hindi torrent is a dual audio torrent file that contains Argo movie in Hindi and English languages. You can download Argo 2012 dual audio hindi torrent from various torrent sites that host torrent files and magnet links. A magnet link is a link that contains the hash value of a torrent file, which can be used to download the file without having to download the torrent file first.

                -

                How to Download Argo 2012 Dual Audio Hindi Torrent

                -

                Downloading Argo 2012 dual audio hindi torrent is not difficult, but you need to have some basic knowledge of how torrents work. Here are the steps you need to follow:

                -
                  -
                1. First, you need to have a torrent client installed on your device. A torrent client is a software that allows you to download and upload files using the peer-to-peer protocol. Some popular torrent clients are uTorrent, BitTorrent, qBittorrent, etc.
                2. -
                3. Second, you need to find a reliable torrent site that has Argo 2012 dual audio hindi torrent available. A torrent site is a website that hosts torrent files and magnet links that you can use to download files. Some popular torrent sites are The Pirate Bay, 1337x, RARBG, etc.
                4. -
                5. Third, you need to search for Argo 2012 dual audio hindi torrent on the torrent site of your choice. You can use the search bar or browse through the categories to find the movie. Make sure to check the file size, seeders, leechers, comments and ratings before downloading. Seeders are the users who have the complete file and are sharing it with others. Leechers are the users who are downloading the file but have not completed it yet. Comments and ratings can help you determine the quality and authenticity of the file.
                6. -
                7. Fourth, you need to download Argo 2012 dual audio hindi torrent using your torrent client. You can either click on the torrent file or copy and paste the magnet link into your torrent client. The download will start automatically and you can monitor the progress on your torrent client.
                8. -
                9. Fifth, you need to enjoy watching Argo online in Hindi and English dual audio. Once the download is finished, you can open the file using a media player that supports dual audio. You can switch between the audio tracks using the settings of your media player.
                10. -
                -

                Benefits of Watching Argo 2012 Dual Audio Hindi Torrent

                -

                Watching Argo online in Hindi and English dual audio has many benefits for different types of viewers. Here are some of them:

                -

                argo 2012 hindi dubbed movie download torrent
                -argo dual audio 720p bluray torrent
                -argo 2012 full movie in hindi watch online
                -argo hindi audio track download
                -argo 2012 brrip dual audio hindi english
                -argo movie download in hindi 480p
                -argo dual audio 1080p torrent
                -argo 2012 hindi dubbed 300mb
                -argo full movie download in hindi filmyzilla
                -argo hindi audio file download
                -argo dual audio khatrimaza
                -argo 2012 movie download in hindi worldfree4u
                -argo hindi dubbed mp4 download
                -argo dual audio mkv
                -argo 2012 movie in hindi free download
                -argo hindi dubbed filmywap
                -argo dual audio moviescounter
                -argo 2012 movie download in hindi 720p
                -argo hindi dubbed dailymotion
                -argo dual audio extramovies
                -argo 2012 movie watch online in hindi
                -argo hindi dubbed bolly4u
                -argo dual audio yts
                -argo 2012 movie download in hindi hd
                -argo hindi dubbed youtube
                -argo dual audio rarbg
                -argo 2012 movie online in hindi
                -argo hindi dubbed pagalmovies
                -argo dual audio magnet link
                -argo 2012 movie free download in hindi
                -argo hindi dubbed skymovieshd
                -argo dual audio limetorrents
                -argo 2012 movie stream in hindi
                -argo hindi dubbed coolmoviez
                -argo dual audio kickass
                -argo 2012 movie subtitle in hindi
                -argo hindi dubbed movierulz
                -argo dual audio torrentz2
                -argo 2012 movie review in hindi
                -argo hindi dubbed moviesflix
                -argo dual audio x264
                -argo 2012 movie trailer in hindi
                -argo hindi dubbed okhatrimaza
                -argo dual audio eztv
                -argo 2012 movie cast in hindi
                -argo hindi dubbed ssrmovies
                -argo dual audio hevc

                -
                  -
                • If you are a fan of Argo movie or Ben Affleck's work, you can watch Argo online in Hindi and English dual audio without any hassle or cost. You can enjoy the movie at your own convenience and pace.
                • -
                • If you are interested in learning more about the history and politics of Iran and America in the late 1970s, you can watch Argo online in Hindi and English dual audio and get a better understanding of the events and characters involved in the Iran hostage crisis.
                • -
                • If you are learning Hindi or English as a second language, you can watch Argo online in Hindi and English dual audio and improve your listening and comprehension skills. You can compare and contrast how different words and expressions are used in both languages.
                • -
                • If you are looking for a movie that will keep you entertained and engaged for two hours, you can watch Argo online in Hindi and English dual audio and experience a thrilling and captivating story that will keep you on the edge of your seat.
                • -
                -

                In conclusion, Argo 2012 dual audio hindi torrent is an excellent option for watching Argo online in Hindi and English dual audio. It offers you high-quality video and audio, along with the convenience of downloading and watching at your own pace. If you are looking for a movie that will impress you with its plot, direction, acting and editing, Argo 2012 dual audio hindi torrent is the one for you.

                -

                Challenges of Downloading Argo 2012 Dual Audio Hindi Torrent

                -

                While downloading Argo 2012 dual audio hindi torrent has many benefits, it also comes with some challenges that you need to be aware of. Here are some of them:

                -
                  -
                • Downloading Argo 2012 dual audio hindi torrent may be illegal in some countries or regions, depending on the copyright laws and regulations. You may face legal consequences if you are caught downloading or sharing pirated content.
                • -
                • Downloading Argo 2012 dual audio hindi torrent may expose you to malware, viruses, spyware and other harmful software that can damage your device or compromise your privacy and security. You need to have a reliable antivirus and firewall software installed on your device and scan the file before opening it.
                • -
                • Downloading Argo 2012 dual audio hindi torrent may take a long time, depending on the file size, the number of seeders and leechers, and your internet speed. You need to have a stable and fast internet connection and enough storage space on your device.
                • -
                • Downloading Argo 2012 dual audio hindi torrent may not guarantee the quality and accuracy of the video and audio. You may encounter issues such as low resolution, poor sound, missing subtitles, sync errors, etc. You need to check the comments and ratings of the file before downloading it and use a media player that supports dual audio.
                • -
                -

                Therefore, you need to be careful and cautious when downloading Argo 2012 dual audio hindi torrent. You need to weigh the pros and cons of downloading the file and make sure you are not breaking any laws or risking your device or data.

                -

                Alternatives to Downloading Argo 2012 Dual Audio Hindi Torrent

                -

                If you are not comfortable or confident with downloading Argo 2012 dual audio hindi torrent, you can look for other alternatives to watch Argo online in Hindi and English dual audio. Here are some of them:

                -
                  -
                • You can watch Argo online in Hindi and English dual audio on streaming platforms that offer the movie in both languages. Some popular streaming platforms are Netflix, Amazon Prime Video, Disney+ Hotstar, etc. You need to have a subscription or a membership to access these platforms.
                • -
                • You can watch Argo online in Hindi and English dual audio on websites that host movies and shows in various languages. Some popular websites are MoviesFlix, WorldFree4u, Filmyzilla, etc. You need to have a browser that supports these websites and an ad blocker to avoid pop-ups and ads.
                • -
                • You can watch Argo online in Hindi and English dual audio on YouTube channels that upload movies and shows in different languages. Some popular YouTube channels are Goldmines Telefilms, Pen Movies, Shemaroo Movies, etc. You need to have a YouTube account and a good internet connection to access these channels.
                • -
                -

                However, you need to be aware that these alternatives may not be legal or safe either. You may still face legal issues if you are watching or sharing copyrighted content without permission. You may also face malware, viruses, spyware and other harmful software that can damage your device or compromise your privacy and security. You need to be careful and cautious when using these alternatives as well.

                -

                What to Expect from Argo 2012 Dual Audio Hindi Torrent

                -

                Argo 2012 dual audio hindi torrent is a file that will let you watch Argo online in Hindi and English dual audio with high-quality video and audio. But what can you expect from the movie itself? Here are some of the highlights of Argo:

                -
                  -
                • Argo is a movie that tells a true story with a twist. It is based on the real-life operation known as the Canadian Caper, in which six American diplomats were rescued from Iran by posing as a film crew. However, the movie also adds some fictional elements to make the story more dramatic and cinematic.
                • -
                • Argo is a movie that features an ensemble cast of talented actors. Besides Ben Affleck, who plays the lead role of Tony Mendez, the movie also stars Bryan Cranston, John Goodman, Alan Arkin, Victor Garber, Tate Donovan, Clea DuVall, Scoot McNairy and many more. The actors deliver convincing and memorable performances that bring their characters to life.
                • -
                • Argo is a movie that balances different genres and tones. It is a historical drama that depicts the political and social turmoil of the Iran hostage crisis. It is also a thriller that creates tension and suspense as the rescue mission unfolds. It is also a comedy that injects humor and satire into the Hollywood aspect of the plot.
                • -
                • Argo is a movie that pays homage to the power of cinema and storytelling. It shows how a fake movie can become a real-life rescue mission and how art can save lives. It also references and recreates various classic movies and genres, such as Star Wars, Planet of the Apes, sci-fi, westerns, etc.
                • -
                -

                Therefore, you can expect Argo 2012 dual audio hindi torrent to be a file that will let you watch an amazing movie that will entertain you, educate you and inspire you.

                -

                Conclusion

                -

                Argo 2012 dual audio hindi torrent is a file that you can download from various torrent sites to watch Argo online in Hindi and English dual audio. It offers you high-quality video and audio, along with the convenience of downloading and watching at your own pace. However, you also need to be aware of the challenges and risks of downloading Argo 2012 dual audio hindi torrent, such as legal issues, malware, slow speed and quality issues. You also need to consider other alternatives to watch Argo online in Hindi and English dual audio, such as streaming platforms, websites and YouTube channels. However, these alternatives may not be legal or safe either.

                -

                In conclusion, Argo 2012 dual audio hindi torrent is an option for watching Argo online in Hindi and English dual audio that has its pros and cons. You need to be careful and cautious when downloading or using this file. You also need to respect the rights and efforts of the creators and distributors of the movie. If you are looking for a movie that will impress you with its plot, direction, acting and editing, Argo 2012 dual audio hindi torrent is the one for you.

                -

                In conclusion, Argo 2012 dual audio hindi torrent is an option for watching Argo online in Hindi and English dual audio that has its pros and cons. You need to be careful and cautious when downloading or using this file. You also need to respect the rights and efforts of the creators and distributors of the movie. If you are looking for a movie that will impress you with its plot, direction, acting and editing, Argo 2012 dual audio hindi torrent is the one for you.

                679dcb208e
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/Autodesk-Revit-2017-X64torrent.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/Autodesk-Revit-2017-X64torrent.md deleted file mode 100644 index 3c0b30f1001c6f4a242cb11485c20a5b80a395ea..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/Autodesk-Revit-2017-X64torrent.md +++ /dev/null @@ -1,98 +0,0 @@ -## Autodesk Revit 2017 X64torrent - - - - - - ![Autodesk Revit 2017 X64torrent](https://img.p30download.ir/software/image/2022/04/1649230168_autodesk-revit-2023.jpg) - - - - - -**DOWNLOAD >>> [https://urluso.com/2txPa3](https://urluso.com/2txPa3)** - - - - - - - - - - - - Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Autodesk Revit 2017 X64torrent": - -# How to Download Autodesk Revit 2017 X64torrent - - - -Autodesk Revit 2017 is a powerful software for building information modeling (BIM) that helps architects, engineers, and contractors design, construct, and manage buildings. It offers features such as parametric modeling, 3D visualization, collaboration, documentation, and analysis. - - - -If you are looking for a way to download Autodesk Revit 2017 X64torrent, you might be disappointed to know that Autodesk does not provide this option on their website. Autodesk only allows you to download the latest version of Revit or the previous three versions. However, there are some alternative methods that you can try to get your hands on this software. - - - -## Method 1: Use the Autodesk Assistant - - - -The Autodesk Assistant is a tool that helps you download and install previous versions of Autodesk software. You can use it to download products four to five years back[^4^]. Here are the steps to use the Autodesk Assistant: - - - -1. Go to [this page](https://www.autodesk.com/support/download-install/individuals/configure-install/download-previous-versions) and click on "Download products four to five years back using the Autodesk Assistant". - -2. Run the downloaded file and follow the instructions to install the Autodesk Assistant. - -3. Launch the Autodesk Assistant and sign in with your Autodesk account. - -4. Select "Revit" from the list of products and choose "2017" as the version. - -5. Select "64-bit" as the operating system and click on "Download". - -6. Wait for the download to complete and then run the installer file. - -7. Follow the instructions to install Revit 2017 on your computer. - - - -## Method 2: Use a Third-Party Website - - - -Another way to download Autodesk Revit 2017 X64torrent is to use a third-party website that hosts torrent files. However, this method is not recommended as it may expose you to legal risks, malware, viruses, or other security threats. You should also be aware that downloading or using pirated software is illegal and may violate Autodesk's terms of service. Therefore, use this method at your own risk and discretion. - - - -Here are some steps to use a third-party website: - - - -1. Go to a website that offers torrent files, such as [SoundCloud](https://soundcloud.com/agasmicvi1987/autodesk-revit-2017-x64torrent)[^3^] or [The Pirate Bay](https://thepiratebay.org/search/autodesk%20revit%202017/0/99/0). - -2. Search for "Autodesk Revit 2017 X64torrent" and find a file that has a high number of seeders and leechers. - -3. Download the torrent file and open it with a torrent client, such as [BitTorrent](https://www.bittorrent.com/) or [uTorrent](https://www.utorrent.com/). - -4. Wait for the download to complete and then run the installer file. - -5. Follow the instructions to install Revit 2017 on your computer. - - - -## Conclusion - - - -In this article, we have shown you how to download Autodesk Revit 2017 X64torrent using two different methods. The first method is to use the Autodesk Assistant, which is a safe and official way to get previous versions of Autodesk software. The second method is to use a third-party website that hosts torrent files, which is a risky and illegal way to get pirated software. We hope this article has been helpful for you and we advise you to use the first method if possible. - - dfd1c89656 - - - - - diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/CSR Classics MOD APK Experience the Thrill of Classic Racing with Infinite Money.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/CSR Classics MOD APK Experience the Thrill of Classic Racing with Infinite Money.md deleted file mode 100644 index 3223f0ba3274630254ff1f582e139985477954e3..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/CSR Classics MOD APK Experience the Thrill of Classic Racing with Infinite Money.md +++ /dev/null @@ -1,115 +0,0 @@ -
                -

                CSR Classics APK (Unlimited Money Mod) - A Review

                -

                If you are a fan of classic cars and racing games, you might want to check out CSR Classics, a popular game that lets you restore and race some of the most iconic vehicles in history. And if you want to enjoy the game without any limitations, you might want to try the unlimited money mod, which gives you access to unlimited resources and features. In this article, we will review CSR Classics and its unlimited money mod, and tell you how to download and install it on your Android device.

                -

                csr classics apk (unlimited money mod)


                Download ✪✪✪ https://bltlly.com/2uOkzi



                -

                What is CSR Classics?

                -

                CSR Classics is a racing game developed by NaturalMotionGames Ltd, the same studio behind CSR Racing and CSR Racing 2. The game was released in 2013 and has been downloaded over 50 million times on Google Play. The game is set in a fictional city where you can participate in various events and challenges, such as drag races, crew battles, ladder races, and more. The game has a story mode where you can compete against different crews and bosses, as well as an online multiplayer mode where you can race against other players from around the world.

                -

                The gameplay of CSR Classics

                -

                The gameplay of CSR Classics is similar to other CSR games, but with a twist. Instead of starting with a modern car, you start with a rusty old car that you have to restore and upgrade. You can choose from over 50 classic cars from famous brands like Ford, Chevrolet, BMW, Mercedes-Benz, Jaguar, and more. You can customize your car with different parts, paint jobs, decals, and wheels. You can also tune your car to improve its performance and optimize it for different types of races.

                -

                The game has a simple control system that requires you to tap the screen at the right time to shift gears and use nitro. You can also tilt your device to steer your car. The game has realistic physics and graphics that make the racing experience more immersive and exciting. You can race in different locations and weather conditions, such as sunny days, rainy nights, snowy roads, and more.

                -

                The features of CSR Classics

                -

                CSR Classics has many features that make it one of the best racing games on Android. Here are some of them:

                -

                Stunning graphics and sound effects

                -

                The game has amazing graphics that bring the classic cars to life. You can see every detail of your car, from the scratches and dents to the shiny chrome and leather. The game also has realistic sound effects that match the engine sounds and exhaust noises of each car. You can hear the roar of your car as you speed up and the screech of your tires as you brake.

                -

                A huge collection of classic cars

                -

                The game has over 50 classic cars from the 1950s to the 1990s that you can collect and restore. You can find cars from legendary manufacturers like Aston Martin, Ferrari, Porsche, Lamborghini, Dodge, Shelby, and more. You can also unlock rare cars that are hard to find in real life, such as the Shelby Cobra Daytona Coupe, the Ford GT40 Mk II, the Lamborghini Countach LP 400S, and more.

                -

                csr classics mod apk unlimited money and gold
                -csr classics hack apk unlimited money
                -csr classics apk mod money free download
                -csr classics unlimited money mod apk latest version
                -csr classics mod apk unlimited money android 1
                -download csr classics mod apk unlimited money and gold
                -csr classics apk mod unlimited money and gold offline
                -csr classics mod apk unlimited money revdl
                -csr classics hack mod apk unlimited money and gold
                -csr classics apk mod unlimited money no root
                -csr classics unlimited money mod apk obb
                -csr classics mod apk unlimited money and gold for android
                -csr classics apk mod unlimited money and gold download
                -csr classics mod apk unlimited money rexdl
                -csr classics hack unlimited money mod apk download
                -csr classics mod apk unlimited money and gold 2023
                -csr classics apk mod unlimited money and gold free download
                -csr classics mod apk unlimited money and gold android
                -csr classics hack tool apk unlimited money and gold
                -csr classics mod apk unlimited money and gold 2022
                -download game csr classics mod apk unlimited money and gold
                -csr classics v3.1.1 mod apk unlimited money and gold
                -csr classics 3.0.1 mod apk unlimited money and gold
                -download csr classics hack apk unlimited money and gold
                -csr classics 2.0.0 mod apk unlimited money and gold
                -download csr classics v3.1.1 mod apk unlimited money and gold
                -csr classics 2.9.3 mod apk unlimited money and gold
                -download game csr classics hack apk unlimited money and gold
                -csr classics 2.9.0 mod apk unlimited money and gold
                -download game csr classics v3.1.1 mod apk unlimited money and gold
                -download game csr classics 3.0.1 mod apk unlimited money and gold
                -download game csr classics 2.0.0 mod apk unlimited money and gold
                -download game csr classics 2.9.3 mod apk unlimited money and gold
                -download game csr classics 2.9.0 mod apk unlimited money and gold
                -how to install csr classics mod apk unlimited money and gold
                -how to download csr classics mod apk unlimited money and gold
                -how to get csr classics mod apk unlimited money and gold
                -how to use csr classics hack tool apk unlimited money and gold
                -how to update csr classics mod apk unlimited money and gold
                -how to play csr classics with mod apk unlimited money and gold
                -how to hack csr classics with mod apk unlimited money and gold
                -how to cheat in csr classics with hack tool apk unlimited money and gold
                -how to get free cars in csr classics with hack tool apk unlimited money and gold
                -how to restore cars in csr classics with hack tool apk unlimited money and gold
                -how to unlock all cars in csr classics with hack tool apk unlimited money and gold
                -how to win races in csr classics with hack tool apk unlimited money and gold
                -how to beat bosses in csr classics with hack tool apk unlimited money and gold
                -how to complete events in csr classics with hack tool apk unlimited money and gold
                -how to earn more cash in csr classics with hack tool apk unlimited money and gold

                -

                A thrilling racing experience

                -

                The game has a variety of racing modes that will test your skills and strategy. You can compete in drag races where you have to time your shifts perfectly, or in ladder races where you have to beat increasingly difficult opponents. You can also join a crew and challenge other crews for territory and reputation. You can also take on boss races where you have to beat the leader of each crew in a best-of-three showdown.

                -

                An online multiplayer mode

                The game also has an online multiplayer mode where you can race against other players from around the world. You can join a team and cooperate with your teammates to win races and earn rewards. You can also challenge other players to friendly or competitive races and show off your skills and cars. You can also chat with other players and make new friends.

                -

                What is the unlimited money mod?

                -

                The unlimited money mod is a modified version of CSR Classics that gives you unlimited money and gold in the game. This means that you can buy any car you want, upgrade it to the max, and unlock all the features and modes in the game. You can also skip the ads and enjoy the game without any interruptions.

                -

                How to download and install the mod

                -

                To download and install the mod, you need to follow these steps:

                -
                  -
                1. Uninstall the original CSR Classics game from your device.
                2. -
                3. Download the CSR Classics APK (unlimited money mod) file from a trusted source. You can find it on various websites, such as [APKPure], [APKDone], or [ModDroid].
                4. -
                5. Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and turning it on.
                6. -
                7. Locate the downloaded file on your device and tap on it to install it.
                8. -
                9. Launch the game and enjoy the unlimited money mod.
                10. -
                -

                How to use the mod

                -

                To use the mod, you just need to play the game as usual. You will see that you have unlimited money and gold in your account. You can use them to buy any car you want, upgrade it, customize it, and tune it. You can also access all the features and modes in the game, such as the online multiplayer mode, the crew battles, the boss races, and more.

                -

                The benefits of the mod

                -

                The benefits of the mod are obvious. You can enjoy the game without any limitations or restrictions. You can have fun with your favorite classic cars and race against other players or AI opponents. You can also save your time and effort by skipping the ads and the grinding process. You can also experiment with different cars and settings without worrying about losing money or resources.

                -

                Conclusion

                -

                CSR Classics is a great racing game that lets you restore and race some of the most iconic classic cars in history. The game has stunning graphics, realistic sound effects, a huge collection of cars, a thrilling racing experience, and an online multiplayer mode. The game is free to download and play, but it also has some limitations and ads that might affect your enjoyment.

                -

                If you want to play the game without any limitations or ads, you might want to try the unlimited money mod, which gives you unlimited money and gold in the game. This way, you can buy any car you want, upgrade it, customize it, and unlock all the features and modes in the game. You can also skip the ads and enjoy the game without any interruptions.

                -

                To download and install the mod, you need to uninstall the original game, download the mod file from a trusted source, enable unknown sources on your device, install the mod file, and launch the game. To use the mod, you just need to play the game as usual. You will see that you have unlimited money and gold in your account. You can use them to buy any car you want, upgrade it, customize it, and tune it. You can also access all the features and modes in the game.

                -

                The benefits of the mod are obvious. You can enjoy the game without any limitations or restrictions. You can have fun with your favorite classic cars and race against other players or AI opponents. You can also save your time and effort by skipping the ads and the grinding process. You can also experiment with different cars and settings without worrying about losing money or resources.

                -

                We hope that this article has helped you understand more about CSR Classics and its unlimited money mod. We recommend that you give it a try if you are a fan of classic cars and racing games. We rate this game 4.5 out of 5 stars for its graphics, gameplay, features, and mod compatibility.

                -

                FAQs

                -

                Here are some frequently asked questions about CSR Classics and its unlimited money mod:

                -
                  -
                • Is CSR Classics compatible with my device?
                • -

                  CSR Classics is compatible with Android devices that have at least 1 GB of RAM and Android 4.0 or higher. However, some devices may experience performance issues or crashes due to hardware limitations.

                  -
                • Is CSR Classics safe to play?CSR Classics is safe to play as long as you download it from a trusted source, such as Google Play or the official website. However, the unlimited money mod is not an official version of the game and may contain viruses or malware that could harm your device. Therefore, you should download the mod from a reliable source and scan it with an antivirus before installing it. You should also backup your data and use the mod at your own risk.

                  -
                • Is CSR Classics online or offline?
                • -

                  CSR Classics can be played both online and offline. However, some features and modes, such as the online multiplayer mode, require an internet connection to work. You also need an internet connection to download updates and access some in-game offers and rewards.

                  -
                • How can I restore my progress in CSR Classics?
                • -

                  If you want to restore your progress in CSR Classics, you need to connect your game to a social media account, such as Facebook or Google Play Games. This way, you can sync your data across different devices and recover it if you lose or change your device. You can also use a cloud backup service, such as Google Drive or Dropbox, to save your data and restore it later.

                  -
                • How can I contact the developers of CSR Classics?
                • -

                  If you have any questions, feedback, or issues with CSR Classics, you can contact the developers of the game by using the following methods:

                  -
                    -
                  • Email: support@nmgames.com
                  • -
                  • Facebook: https://www.facebook.com/csrclassics
                  • -
                  • Twitter: https://twitter.com/CSRClassics
                  • -
                  • Website: https://www.naturalmotion.com/csr-classics/
                  • -
                  -
                • What are some similar games to CSR Classics?
                • -

                  If you like CSR Classics, you might also like these similar games:

                  -
                    -
                  • CSR Racing and CSR Racing 2: These are the predecessors and successors of CSR Classics, respectively. They have similar gameplay and features, but with modern cars and more options.
                  • -
                  • Asphalt 9: Legends: This is a fast-paced racing game that lets you drive some of the most prestigious cars in the world. You can customize your car, join a club, and compete in various events and modes.
                  • -
                  • Need for Speed No Limits: This is a racing game that lets you build your dream car and race against other players or AI opponents. You can upgrade your car, join a crew, and explore a huge open world.
                  • -

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Fallout Shelter for PC and Play Without Launcher Hassle.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Fallout Shelter for PC and Play Without Launcher Hassle.md deleted file mode 100644 index 99458cb91997b29d5c863ba3b7cb6f33bb799a12..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Fallout Shelter for PC and Play Without Launcher Hassle.md +++ /dev/null @@ -1,132 +0,0 @@ - -

                  How to Download and Play Fallout Shelter on PC Without Launcher

                  -

                  If you are a fan of the Fallout series, you might have heard of Fallout Shelter, a free-to-play simulation game that lets you build and manage your own post-apocalyptic vault. But did you know that you can also play it on your PC without using the Bethesda Launcher? In this article, we will show you how to download and play Fallout Shelter on PC without launcher, as well as some tips and tricks to make your vault thrive.

                  -

                  What is Fallout Shelter?

                  -

                  A brief introduction to the game and its features

                  -

                  Fallout Shelter is a spin-off game from the popular Fallout franchise, developed by Bethesda Game Studios and released in 2015. The game puts you in the role of an overseer, who is in charge of creating and maintaining a vault for the survivors of a nuclear war. You can customize your vault, assign dwellers to different rooms, send them out to explore the wasteland, and protect them from various threats such as raiders, radroaches, and fire. The game also features a SPECIAL system that determines the dwellers' skills and abilities, as well as a crafting system that allows you to create weapons, outfits, and other items.

                  -

                  fallout shelter pc download without launcher


                  Download 🆗 https://bltlly.com/2uOnGS



                  -

                  The benefits of playing on PC

                  -

                  While Fallout Shelter was originally designed for mobile devices, it is also available for PC users through Steam, Microsoft Store, or Bethesda.net. Playing on PC has some advantages over playing on mobile, such as:

                  -
                    -
                  • Better graphics and performance
                  • -
                  • Larger screen and resolution
                  • -
                  • More convenient controls with mouse and keyboard
                  • -
                  • Steam achievements and cloud saves
                  • -
                  • No ads or microtransactions
                  • -
                  -

                  How to Download Fallout Shelter on PC

                  -

                  The official ways to get the game from Steam, Microsoft Store, or Bethesda.net

                  -

                  The easiest way to download and play Fallout Shelter on PC is to get it from one of the official platforms. Here are the steps for each option:

                  -
                    -
                  1. Steam: Go to the Steam store page for Fallout Shelter and click on "Play Game". The game will be added to your library and downloaded automatically. You can then launch it from Steam.
                  2. -
                  3. Microsoft Store: Go to the Microsoft Store page for Fallout Shelter and click on "Get". The game will be installed on your device and you can launch it from the Start menu or the Xbox app.
                  4. -
                  5. Bethesda.net: Go to the Bethesda.net page for Fallout Shelter and click on "Download". You will need to create an account or log in with an existing one. Then, you will need to download and install the Bethesda Launcher, which will allow you to download and play the game.
                  6. -
                  -

                  The alternative way to launch the game without using Bethesda Launcher

                  -

                  The steps to create a shortcut from FalloutShelter.exe

                  -

                  If you don't want to use the Bethesda Launcher every time you want to play Fallout Shelter, there is a workaround that lets you launch the game directly from its executable file. Here's how:

                  -
                    -
                  1. Download and install Fallout Shelter from Bethesda.net as described above.
                  2. -
                  3. Go to the installation directory of Fallout Shelter, which should be C:\Program Files ( x86)\Bethesda Softworks\Fallout Shelter by default.
                  4. -
                  5. Find the file named FalloutShelter.exe and right-click on it. Select "Create shortcut" from the menu.
                  6. -
                  7. Move the shortcut to your desktop or any other location you prefer.
                  8. -
                  9. Double-click on the shortcut to launch Fallout Shelter without using the Bethesda Launcher.
                  10. -
                  -

                  The drawbacks of this method (offline mode, language settings, etc.)

                  -

                  While this method is convenient, it also has some limitations and disadvantages that you should be aware of. For example:

                  -
                    -
                  • You will not be able to access the online features of the game, such as cloud saves, achievements, or updates. You will only be able to play in offline mode.
                  • -
                  • You will not be able to change the language settings of the game. The game will use the default language of your system.
                  • -
                  • You will not be able to uninstall the game from the Bethesda Launcher. You will have to manually delete the game files from your computer.
                  • -
                  -

                  Therefore, if you want to enjoy the full functionality and support of Fallout Shelter on PC, we recommend that you use the Bethesda Launcher or one of the other official platforms.

                  -

                  How to Play Fallout Shelter on PC

                  -

                  The basic gameplay mechanics and tips for managing your vault

                  -

                  Once you have downloaded and launched Fallout Shelter on PC, you can start building your vault and taking care of your dwellers. The game will guide you through a tutorial that explains the basic gameplay mechanics and objectives. Here are some tips to help you get started:

                  -

                  How to play fallout shelter on pc without bethesda launcher
                  -Fallout shelter pc offline mode without launcher
                  -Fallout shelter pc installation directory and shortcut
                  -Fallout shelter pc free download no launcher required
                  -Fallout shelter pc language settings without launcher
                  -Fallout shelter pc steam version vs bethesda launcher
                  -Fallout shelter pc anti-aliasing fix without launcher
                  -Fallout shelter pc game save location without launcher
                  -Fallout shelter pc cheats and mods without launcher
                  -Fallout shelter pc windows 10 compatibility without launcher
                  -Fallout shelter pc system requirements and performance without launcher
                  -Fallout shelter pc tips and tricks without launcher
                  -Fallout shelter pc vault management guide without launcher
                  -Fallout shelter pc best dwellers and outfits without launcher
                  -Fallout shelter pc how to get legendary weapons and pets without launcher
                  -Fallout shelter pc how to deal with disasters and raids without launcher
                  -Fallout shelter pc how to expand and upgrade rooms without launcher
                  -Fallout shelter pc how to increase happiness and productivity without launcher
                  -Fallout shelter pc how to unlock new quests and locations without launcher
                  -Fallout shelter pc how to earn more caps and resources without launcher
                  -Fallout shelter pc how to transfer data from mobile to pc without launcher
                  -Fallout shelter pc how to backup and restore game data without launcher
                  -Fallout shelter pc how to sync game progress with cloud without launcher
                  -Fallout shelter pc how to play with friends and other players without launcher
                  -Fallout shelter pc how to access the store and buy items without launcher
                  -Fallout shelter pc review and rating without launcher
                  -Fallout shelter pc latest update and patch notes without launcher
                  -Fallout shelter pc download size and installation time without launcher
                  -Fallout shelter pc error and crash fix without launcher
                  -Fallout shelter pc keyboard and mouse controls without launcher
                  -Fallout shelter pc graphics and sound settings without launcher
                  -Fallout shelter pc achievements and trophies without launcher
                  -Fallout shelter pc comparison with other fallout games without launcher
                  -Fallout shelter pc fun facts and easter eggs without launcher
                  -Fallout shelter pc fan art and wallpapers without launcher
                  -Fallout shelter pc community and forums without launcher
                  -Fallout shelter pc support and feedback without launcher
                  -Fallout shelter pc alternatives and similar games without launcher
                  -Fallout shelter pc history and development without launcher
                  -Fallout shelter pc future plans and updates without launcher

                  -
                    -
                  • Build different types of rooms to provide power, water, food, and other resources for your vault. Each room has a specific function and requires a certain amount of dwellers with matching SPECIAL skills to operate efficiently.
                  • -
                  • Expand your vault by digging deeper into the ground and adding more rooms. You can also upgrade your rooms to increase their capacity and output. However, be careful not to overbuild or overpopulate your vault, as this will increase the demand for resources and the risk of incidents.
                  • -
                  • Attract new dwellers by building a radio station or waiting for them to arrive at your vault door. You can also make your existing dwellers happy and productive by assigning them to their ideal jobs, giving them outfits and weapons, and letting them socialize and mate with each other.
                  • -
                  • Send your dwellers out to explore the wasteland and collect caps, items, and experience. You can equip them with weapons, outfits, and stimpacks to increase their chances of survival. You can also recall them back to your vault at any time.
                  • -
                  • Protect your vault from external and internal threats such as raiders, deathclaws, radroaches, fires, and more. You can arm your dwellers with weapons and train them in combat rooms to improve their fighting skills. You can also build defense rooms such as turrets and guard posts to deter invaders.
                  • -
                  -

                  The advanced strategies and tricks for optimizing your resources and dwellers

                  -

                  As you progress in the game, you will face more challenges and opportunities to improve your vault and dwellers. Here are some advanced strategies and tricks that you can use to optimize your gameplay:

                  -
                    -
                  • Use the SPECIAL system to assign your dwellers to the best rooms for their skills. For example, strength is good for power rooms, perception is good for water rooms, endurance is good for exploring the wasteland, etc. You can also train your dwellers in training rooms to increase their SPECIAL stats.
                  • -
                  • Craft weapons, outfits, and other items using junk that you find in the wasteland or in lunchboxes. You can also scrap unwanted items for more junk. Crafting allows you to create rare and legendary items that can boost your dwellers' performance and happiness.
                  • -
                  • Complete quests and objectives to earn caps, lunchboxes, Nuka-Cola Quantum, and other rewards. Quests are missions that you can send a team of dwellers to complete in various locations in the wasteland. Objectives are tasks that you can complete within your vault or while exploring.
                  • -
                  • Use Nuka-Cola Quantum to speed up various actions in the game, such as returning explorers, crafting items, training dwellers, or completing quests. You can earn Nuka-Cola Quantum by completing objectives, opening lunchboxes, or finding them in the wasteland.
                  • -
                  • Manage your resource consumption and production by using the resource bars at the top of the screen. The green bars indicate how much of each resource you have stored in your vault, while the red bars indicate how much of each resource you are consuming per minute. You want to keep your production higher than your consumption to avoid running out of resources.
                  • -
                  Conclusion -

                  A summary of the main points and a call to action for the readers

                  -

                  In conclusion, Fallout Shelter is a fun and addictive game that lets you create and manage your own vault in the Fallout universe. You can download and play it on your PC without using the Bethesda Launcher by following the steps we have outlined in this article. However, you should also be aware of the drawbacks and limitations of this method, and consider using one of the official platforms for a better gaming experience. We hope you enjoyed this article and learned something new. If you have any questions or feedback, feel free to leave a comment below. And if you are ready to start your own vault adventure, download Fallout Shelter on PC today and see how long you can survive in the wasteland!

                  -

                  FAQs

                  -

                  Here are some frequently asked questions and answers about Fallout Shelter on PC:

                  -
                    -
                  1. Is Fallout Shelter free to play on PC?
                    -Yes, Fallout Shelter is free to play on PC, regardless of which platform you choose to download it from. You don't need to pay anything to download, install, or play the game.
                  2. -
                  3. Can I play Fallout Shelter on PC with my friends?
                    -Unfortunately, Fallout Shelter does not have a multiplayer or co-op mode. You can only play it solo. However, you can still share your vault progress and achievements with your friends through social media or screenshots.
                  4. -
                  5. Can I transfer my vault data from mobile to PC or vice versa?
                    -Yes, you can transfer your vault data from mobile to PC or vice versa, but only if you use the same platform (Steam, Microsoft Store, or Bethesda.net) on both devices. You will need to use the cloud save feature to sync your vault data across devices.
                  6. -
                  7. Can I mod Fallout Shelter on PC?
                    -Yes, you can mod Fallout Shelter on PC, but only if you use the Bethesda.net version of the game. You will need to download and install the Fallout Shelter Modding Tool from this website, which will allow you to create and apply mods to your game.
                  8. -
                  9. What are the system requirements for Fallout Shelter on PC?
                    -The minimum system requirements for Fallout Shelter on PC are:
                      -
                    • OS: Windows 7 64-bit
                    • -
                    • Processor: Intel Core 2 Quad CPU Q9550 @2.83GHz
                    • -
                    • Memory: 2 GB RAM
                    • -
                    • Graphics: NVIDIA GeForce GTS 250 1GHz, Radeon HD 6970 1GHz
                    • -
                    • Storage: 2 GB available space
                    • -
                    - The recommended system requirements for Fallout Shelter on PC are:
                      -
                    • OS: Windows 7/8/10 64-bit
                    • -
                    • Processor: Intel Core i5-2300 2.8 GHz/AMD Phenom II X4 945 3.0 GHz or equivalent
                    • -
                    • Memory: 4 GB RAM
                    • -
                    • Graphics: NVIDIA GTX 550 Ti 2GB/AMD Radeon HD 7870 2GB or equivalent
                    • -
                    • Storage: 2 GB available space
                    • -
                  10. -

                  401be4b1e0
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Garena Speed Drifters MOD APK v1.32.0.10340 (Unlimited Nitro Speed Car) for Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Garena Speed Drifters MOD APK v1.32.0.10340 (Unlimited Nitro Speed Car) for Android.md deleted file mode 100644 index 85ab19f56a87084b109daf9b02a4aa9087949fa2..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Garena Speed Drifters MOD APK v1.32.0.10340 (Unlimited Nitro Speed Car) for Android.md +++ /dev/null @@ -1,103 +0,0 @@ - -

                  Garena Speed Drifters Mod APK: A Racing Game That Will Keep You On The Edge Of Your Seat

                  |

                  If you are a fan of racing games, you might have heard of Garena Speed Drifters. It is an exciting racing game that allows players to race against completely diverse opponents and use their skills to outwit them. The game provides a drift button that players can use to easily handle tricky turns and store energy to perform the speed boost process.

                  -

                  garena speed drifters mod apk


                  DOWNLOAD »»» https://bltlly.com/2uOhPs



                  -

                  But what if you want to enjoy more features and benefits in this game? What if you want to have unlimited access to all the cars, tracks, modes, and items in the game? What if you want to win every race without any hassle? Well, there is a way to do that. And that is by downloading Garena Speed Drifters Mod APK.

                  -

                  What is Garena Speed Drifters Mod APK?

                  -

                  Garena Speed Drifters Mod APK is a modified version of the original game that offers some extra features and advantages for the players. Some of these features are:

                  -
                    -
                  • Instant win: You can win any race instantly by using this feature. No matter how fast or skilled your opponents are, you can always cross the finish line first.
                  • -
                  • Unlimited money: You can get unlimited money in this modded version of the game. You can use this money to buy any car, upgrade any part, or unlock any item in the game.
                  • -
                  • Unlocked everything: You can access everything in this modded version of the game. All the cars, tracks, modes, and items are unlocked for you. You can choose any car you want, race on any track you like, play any mode you prefer, and use any item you need.
                  • -
                  • No ads: You can enjoy this modded version of the game without any annoying ads. You can play without any interruption or distraction.
                  • -
                  -

                  Why should you download Garena Speed Drifters Mod APK?

                  -

                  There are many reasons why you should download G arena Speed Drifters Mod APK. Some of these reasons are:

                  -
                    -
                  • You can have more fun and excitement in this game. You can race with any car you want, on any track you like, and with any mode you prefer. You can also use any item you need to boost your speed, drift, or attack your opponents.
                  • -
                  • You can save your time and money in this game. You don't have to spend hours or dollars to earn money or unlock items in the game. You can get everything for free and instantly in this modded version of the game.
                  • -
                  • You can improve your skills and performance in this game. You can practice your racing and drifting skills on different tracks and modes. You can also learn from your opponents and improve your strategies and tactics.
                  • -
                  • You can challenge yourself and others in this game. You can compete with other players from around the world and see who is the best racer and drifter. You can also join a club or create your own club and team up with your friends or other players.
                  • -
                  -

                  How to download and install Garena Speed Drifters Mod APK?

                  -

                  Downloading and installing Garena Speed Drifters Mod APK is very easy and simple. Just follow these steps:

                  -
                    -
                  1. Click on the download button below to download the modded version of the game.
                  2. -
                  3. After the download is complete, go to your device settings and enable the installation of apps from unknown sources.
                  4. -
                  5. Go to your file manager and locate the downloaded file. Tap on it to start the installation process.
                  6. -
                  7. Wait for a few seconds until the installation is done. Then, open the game and enjoy.
                  8. -
                  -

                  Note: You may need to uninstall the original version of the game before installing the modded version.

                  -

                  garena speed drifters mod apk unlimited money
                  -garena speed drifters mod apk latest version
                  -garena speed drifters mod apk download for android
                  -garena speed drifters mod apk free shopping
                  -garena speed drifters mod apk no root
                  -garena speed drifters mod apk offline
                  -garena speed drifters mod apk hack
                  -garena speed drifters mod apk 2023
                  -garena speed drifters mod apk revdl
                  -garena speed drifters mod apk happymod
                  -garena speed drifters mod apk rexdl
                  -garena speed drifters mod apk pure
                  -garena speed drifters mod apk obb
                  -garena speed drifters mod apk android 1
                  -garena speed drifters mod apk data
                  -garena speed drifters mod apk unlimited diamonds
                  -garena speed drifters mod apk vip
                  -garena speed drifters mod apk god mode
                  -garena speed drifters mod apk mega
                  -garena speed drifters mod apk all cars unlocked
                  -garena speed drifters mod apk anti ban
                  -garena speed drifters mod apk an1
                  -garena speed drifters mod apk all skins unlocked
                  -garena speed drifters mod apk blackmod
                  -garena speed drifters mod apk by androidoyun club
                  -garena speed drifters mod apk cheat
                  -garena speed drifters mod apk coins and gems
                  -garena speed drifters mod apk club
                  -garena speed drifters mod apk cracked
                  -garena speed drifters mod apk chinese version
                  -garena speed drifters mod apk english version
                  -garena speed drifters mod apk everything unlocked
                  -garena speed drifters mod apk easy download
                  -garena speed drifters mod apk full unlocked
                  -garena speed drifters mod apk file download
                  -garena speed drifters mod apk for pc
                  -garena speed drifters mod apk for ios
                  -garena speed drifters mod apk game download
                  -garena speed drifters mod apk generator online
                  -garena speed drifters mod apk gameplay
                  -garena speed drifters mod apk high damage
                  -garena speed drifters mod apk highly compressed
                  -garena speed drifters mod apk hack download 2023
                  -garena speed drifters mod apk indonesia 2023 terbaru gratis download no iklan tanpa root work 100%
                  -garena speed drifters mod apk japan version 2023 update new cars and tracks unlimited nitro and boost free shopping no ads no verification no survey no password mediafire link direct download link online multiplayer mode support all devices android ios windows mac linux chrome os etc.

                  -

                  What are some tips and tricks to master Garena Speed Drifters?

                  -

                  Garena Speed Drifters is a game that requires skill, strategy, and luck. Here are some tips and tricks that can help you master this game:

                  -
                    -
                  • Choose the right car for your style. There are different types of cars in this game, such as speed, balance, drift, and acceleration. Each car has its own strengths and weaknesses, so choose the one that suits your style and preference.
                  • -
                  • Use the drift button wisely. The drift button is a key feature in this game that allows you to handle sharp turns and store energy for speed boost. However, drifting too much or too little can affect your performance, so use it wisely and timely.
                  • -
                  • Use the items effectively. There are different items in this game that can help you or hinder your opponents, such as rockets, bombs, shields, magnets, etc. Use them effectively to gain an advantage or overcome a disadvantage in the race.
                  • -
                  • Customize your car and character. You can customize your car and character in this game by changing their colors, skins, stickers, accessories, etc. This can make your car and character more unique and attractive.
                  • -
                  • Join a club or create your own club. You can join a club or create your own club in this game to interact with other players, share tips and tricks, participate in events, earn rewards, etc.
                  • -
                  -

                  Conclusion

                  -

                  Garena Speed Drifters Mod APK is a racing game that will keep you on the edge of your seat. It offers many features and benefits that make it more fun and exciting than the original version of the game. You can download it easily and install it on your device without any hassle. You can also use some tips and tricks to improve your skills and performance in the game. So, what are you waiting for? Download Garena Speed Drifters Mod APK now and enjoy the thrill of racing and drifting.

                  -

                  FAQs

                  -

                  Here are some frequently asked questions and their answers about Garena Speed Drifters Mod APK:

                  -
                    -
                  1. Is Garena Speed Drifters Mod APK safe to use?
                  2. -

                    Yes, Garena Speed Drifters Mod APK is safe to use. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from a trusted source like ours.

                    -
                  3. Is Garena Speed Drifters Mod APK compatible with my device?
                  4. -

                    Garena Speed Drifters Mod APK is compatible with most Android devices that have Android 4.1 or higher versions. However, some devices may not support some features or functions of the modded version of the game.

                    -
                  5. Do I need to root my device to use Garena Speed Drifters Mod APK?
                  6. -

                    No, you don't need to root your device to use Garena Speed Drifters Mod APK. You can use it without any root access or permission.

                    -
                  7. Can I play online with other players using Garena Speed Drifters Mod APK?
                  8. -

                    Yes, you can play online with other players using Garena Speed Drifters Mod APK. However, you may face some issues or errors while playing online, such as lag, disconnect, or ban. Therefore, we recommend you to use a VPN or play offline to avoid these problems.

                    -
                  9. Can I update Garena Speed Drifters Mod APK?
                  10. -

                    Yes, you can update Garena Speed Drifters Mod APK whenever there is a new version available. However, you may lose some of the modded features or data after updating. Therefore, we suggest you to backup your data before updating or wait for the updated modded version from our site.

                    -
                  11. How can I contact the developer of Garena Speed Drifters Mod APK?
                  12. -

                    You can contact the developer of Garena Speed Drifters Mod APK by visiting their official website or social media pages. You can also leave a comment or feedback on our site and we will try to reach them for you.

                    -

                  197e85843d
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Able 50 Yard Zero Target ((FREE)).md b/spaces/tioseFevbu/cartoon-converter/scripts/Able 50 Yard Zero Target ((FREE)).md deleted file mode 100644 index 8a99efa05ba1ac85a83ea4ac900c27f2bd80f67f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Able 50 Yard Zero Target ((FREE)).md +++ /dev/null @@ -1,32 +0,0 @@ - -

                  How to Zero Your Red Dot Optic with the Able 50 Yard Zero Target

                  -

                  If you own an AR-15 rifle with a red dot optic, you might be wondering how to zero it for maximum accuracy and effectiveness. One of the most popular methods is to use the Able 50 Yard Zero Target, which allows you to zero your optic at 50 yards and have a consistent point of aim and point of impact from close to medium ranges.

                  -

                  The Able 50 Yard Zero Target is designed to take advantage of the human eye's natural tendency to center an object within a circle. It also has different colored dots and rings to correspond with different sizes of red dot optics, as well as grid lines and adjustment references to help you dial in your zero.

                  -

                  Able 50 Yard Zero Target


                  Download Zip ––– https://urlcod.com/2uHvVR



                  -

                  In this article, we will explain how to use the Able 50 Yard Zero Target and why it is a good choice for your AR-15 rifle.

                  -

                  What is the Able 50 Yard Zero Target?

                  -

                  The Able 50 Yard Zero Target is a printable target that you can download from ARMA DYNAMICS, a company that specializes in firearms training and consulting. The target consists of:

                  -
                    -
                  • A 1 MOA center 'zero' dot
                  • -
                  • A 2 MOA green inner dot to correspond with 2 MOA optics
                  • -
                  • A 4 MOA red outer dot to correspond with 4 MOA optics
                  • -
                  • A larger black ring to assist in centering the optic's dot over the center of the target
                  • -
                  • Bold gray cross-hairs to assist in centering the reticle on the target
                  • -
                  • Grid lines with click adjustment references and adjustment dial references (1/2 MOA adjustments common on red dot sights)
                  • -
                  -

                  The target is designed to be printed on standard 8.5" x 11" paper and placed at 50 yards. The idea is that when you aim at the colored portion of the target that matches your optic's size, your bullet will hit the center 'zero' dot. This will give you a 50/200 yard zero, meaning that your bullet will hit where you aim at both 50 yards and 200 yards, and only deviate by a few inches at other distances.

                  -

                  Why use the Able 50 Yard Zero Target?

                  -

                  The Able 50 Yard Zero Target has several advantages over other zeroing methods. First of all, it is simple and easy to use. You don't need to calculate any ballistic data or memorize any holdovers. You just aim at the colored dot and adjust your optic until you hit the center dot.

                  -

                  Second, it is versatile and effective. The 50/200 yard zero works well for most AR-15 rifles and ammunition combinations. It allows you to use a simple center-mass hold for targets from point blank to 250 yards, depending on your rifle and ammo. You don't need to worry about bullet drop or windage much within this range.

                  -

                  Third, it is convenient and accessible. You only need a 50 yard range to zero your optic, which is more common than longer ranges. You also only need one target, which saves you time and paper. You can print as many targets as you need from your home or office.

                  -

                  How to use the Able 50 Yard Zero Target?

                  -

                  To use the Able 50 Yard Zero Target, follow these steps:

                  -
                    -
                  1. Print the target on standard 8.5" x 11" paper. Make sure your printer scaling is set to either 'off' or '100%' for proper dimensions.
                  2. -
                  3. Place the target at 50 yards. Use a tape measure or rangefinder to ensure accuracy.
                  4. -
                  5. Mount your red dot optic on your AR-15 rifle and make sure it is secure and aligned with your bore.
                  6. -
                  7. Load your rifle with the ammunition you plan to use and make sure it is safe and legal to shoot at your range.
                  8. -
                  9. Aim at the colored dot that matches your optic's size. For example, if you have a 2 MOA red dot, aim at the green inner dot. Make sure your

                    -

                    81aa517590
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Fundamentos De La Administracion Munch Galindo Pdf Download 2021.md b/spaces/tioseFevbu/cartoon-converter/scripts/Fundamentos De La Administracion Munch Galindo Pdf Download 2021.md deleted file mode 100644 index e0e61061938a3f5b4d95f1ea330d1cb7ae880513..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Fundamentos De La Administracion Munch Galindo Pdf Download 2021.md +++ /dev/null @@ -1,26 +0,0 @@ - -

                    ¿Cómo descargar el libro Fundamentos de Administración de Lourdes Münch Galindo y José G. García Martínez?

                    -

                    El libro Fundamentos de Administración de Lourdes Münch Galindo y José G. García Martínez es una obra que expone los principios, las técnicas y los fundamentos básicos de la administración. El contenido es una recopilación de las teorías de los tratadistas más conocidos en este campo, que a la vez ha sido enriquecido con la experiencia profesional y académica de los autores[^2^].

                    -

                    Este libro es un texto indispensable para los estudiantes y profesionales de la administración, ya que les ofrece una visión integral y actualizada de esta disciplina. Además, cuenta con ejemplos, casos prácticos, ejercicios y actividades que facilitan el aprendizaje y la aplicación de los conceptos.

                    -

                    fundamentos de la administracion munch galindo pdf download


                    Downloadhttps://urlcod.com/2uHwxx



                    -

                    Si quieres descargar el libro Fundamentos de Administración de Lourdes Münch Galindo y José G. García Martínez en formato PDF, puedes hacerlo de varias formas:

                    - -

                    Esperamos que esta información te haya sido útil y que puedas disfrutar del libro Fundamentos de Administración de Lourdes Münch Galindo y José G. García Martínez.

                    - -

                    El libro Fundamentos de Administración de Lourdes Münch Galindo y José G. García Martínez está dividido en cinco unidades, que abordan los siguientes temas:

                    -
                      -
                    1. La administración: conceptos, antecedentes, importancia y objetivos.
                    2. -
                    3. El proceso administrativo: planeación, organización, dirección y control.
                    4. -
                    5. Las áreas funcionales de la administración: producción, mercadotecnia, finanzas y recursos humanos.
                    6. -
                    7. La administración en el contexto global: entorno, cultura y responsabilidad social.
                    8. -
                    9. Los retos y tendencias de la administración: innovación, calidad, ética y liderazgo.
                    10. -
                    -

                    Cada unidad cuenta con una introducción, un mapa conceptual, un desarrollo teórico, un resumen, una autoevaluación y una bibliografía. Además, se incluyen cuadros sinópticos, diagramas de flujo, tablas comparativas y gráficas que facilitan la comprensión de los contenidos. Asimismo, se presentan casos reales de empresas nacionales e internacionales que ilustran la aplicación práctica de la administración.

                    -

                    El libro Fundamentos de Administración de Lourdes Münch Galindo y José G. García Martínez es una obra que combina el rigor académico con la claridad expositiva. Su lenguaje es sencillo y accesible para todo tipo de lectores. Su enfoque es moderno y actualizado, acorde con las exigencias del mundo empresarial. Su propósito es brindar a los estudiantes y profesionales de la administración las herramientas necesarias para desempeñarse con éxito en el ámbito laboral.

                    -

                    81aa517590
                    -
                    -
                    \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Galactic Codex Essentials Edition 2183 Pdf 14 Free.md b/spaces/tioseFevbu/cartoon-converter/scripts/Galactic Codex Essentials Edition 2183 Pdf 14 Free.md deleted file mode 100644 index 6dc98533da8c15299aaeef50d4c5d9d42c056453..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Galactic Codex Essentials Edition 2183 Pdf 14 Free.md +++ /dev/null @@ -1,20 +0,0 @@ -
                    -Hello, this is Bing. I can help you with writing a title and an article with SEO optimization and HTML formatting for the keyword "Galactic Codex Essentials Edition 2183 Pdf 14". Here is a possible title and article: - -

                    Galactic Codex Essentials Edition 2183 Pdf 14: A Guide to the Mass Effect Universe

                    -

                    If you are a fan of the Mass Effect video game series, you might be interested in reading the Galactic Codex Essentials Edition 2183 Pdf 14, a booklet that contains some of the information in the in-game Codex with some additional data. The booklet was included in the Mass Effect Limited Collector's Edition, which was released in 2007. The booklet provides a comprehensive overview of the Mass Effect universe, including the history, races, factions, technology, biotics and more.

                    -

                    Galactic Codex Essentials Edition 2183 Pdf 14


                    Downloadhttps://urlcod.com/2uHvji



                    -

                    In this article, we will give you a brief summary of what you can find in the Galactic Codex Essentials Edition 2183 Pdf 14, and why it is a valuable resource for Mass Effect fans. We will also tell you where you can download or buy the booklet online.

                    -

                    What is the Galactic Codex?

                    -

                    The Galactic Codex is a collection of information that is accessible through the user interface of the Mass Effect games. It contains entries on various topics related to the game world, such as planets, species, cultures, events, organizations and more. The Codex entries are narrated by an anonymous voice that provides objective and factual information. The Codex also has a secondary section that contains personal notes and opinions from Commander Shepard's squadmates and other characters.

                    -

                    The Galactic Codex is updated as the player progresses through the game and discovers new information. It helps the player to understand the background and context of the game's story and setting. It also adds depth and richness to the game's lore and worldbuilding.

                    -

                    What is the Galactic Codex Essentials Edition 2183 Pdf 14?

                    -

                    The Galactic Codex Essentials Edition 2183 Pdf 14 is a printed version of some of the information in the in-game Codex. It was written by Microsoft Game Studios based on BioWare's game documentation[^1^] [^2^]. It contains 36 pages of text and illustrations that cover some of the most important aspects of the Mass Effect universe.

                    -

                    The booklet is divided into four sections: The Citadel Races, The History of Citadel Space, Corporations and Biotics. Each section contains several entries that provide detailed information on various topics. For example, in the Citadel Races section, you can learn about the Asari, Turians, Salarians, Krogan, Quarians and Volus. In the History of Citadel Space section, you can learn about the First Contact War, the Rachni Wars, the Krogan Rebellions and more. In the Corporations section, you can learn about some of the major companies that operate in the galaxy, such as ExoGeni Corporation, Binary Helix and Sirta Foundation. In the Biotics section, you can learn about what biotics are, how they work and how they are regulated.

                    -

                    -

                    The Galactic Codex Essentials Edition 2183 Pdf 14 provides a lot of information that is not available or not fully explained in the in-game Codex. It also gives a more complete picture of the Mass Effect universe before the events of the first game. It is a useful guide for anyone who wants to learn more about the game's lore and setting.

                    -

                    Where can I get the Galactic Codex Essentials Edition 2183 Pdf 14?

                    -

                    The Galactic Codex Essentials Edition 2183 Pdf 14 was originally included in the Mass Effect Limited Collector's Edition, which was a special edition of the first game that came with a metal case, an art book and a bonus DVD. The Limited Collector's Edition was only available for Xbox 360 and PC platforms. It is no longer in production and can be hard to find online.

                    -

                    However, you can still download or buy the booklet online from various sources. One option is to download it as a PDF file from Mass Effect Wiki, which has uploaded a scanned copy of the booklet for free[^1^]. Another option is to buy it as a paperback book from https://urlcod.com/2uHxe3



                    -

                    However, Lectra Modaris is not a cheap software. It requires a license and a subscription plan to use it. If you want to save money and use Lectra Modaris without paying, you might be tempted to use a cracked version of the software. But is it worth it? And how can you install it on your Windows computer?

                    -

                    In this article, we will answer these questions and show you how to install Lectra Modaris full cracked version on Windows using an automatic installation method. We will also discuss the pros and cons of using a cracked version, as well as the risks and precautions involved.

                    -

                    What is Lectra Modaris?

                    -

                    Lectra Modaris is a patternmaking software developed by Lectra, a leading company in integrated technology solutions for the fashion industry. Lectra Modaris is designed to help fashion professionals create patterns and production-ready prototypes with efficiency and quality.

                    -

                    Features and benefits of Lectra Modaris

                    -

                    Some of the features and benefits of Lectra Modaris are:

                    -

                    -
                      -
                    • It supports various file formats, such as DXF, AAMA, ASTM, MDL, etc.
                    • -
                    • It allows you to create base patterns from scratch or from existing libraries.
                    • -
                    • It enables you to modify patterns with parametric tools and automatic adjustments.
                    • -
                    • It allows you to grade patterns according to different sizes and measurements.
                    • -
                    • It enables you to simulate 3D prototypes and check the fit and drape of your garments on different mannequins and fabrics.
                    • -
                    • It allows you to export patterns and 3D models to other software or platforms, such as Adobe Illustrator, Gerber AccuMark, etc.
                    • -
                    • It helps you optimize fabric consumption and reduce costs with quick estimate tools.
                    • -
                    -

                    Requirements and compatibility of Lectra Modaris

                    -

                    To use Lectra Modaris, you need to have the following requirements:

                    -
                      -
                    • A Windows computer with at least 4 GB of RAM, 10 GB of free disk space, and an Intel Core i5 processor or equivalent.
                    • -
                    • A Windows operating system, such as Windows 7, Windows 8.1, or Windows 10.
                    • -
                    • A license key and a subscription plan from Lectra.
                    • -
                    • A USB dongle or an internet connection for activation.
                    • -
                    -

                    Lectra Modaris is compatible with other software and platforms, such as:

                    -
                      -
                    • Lectra Diamino, a marker-making software that optimizes fabric cutting.
                    • -
                    • Lectra Kaledo, a design software that helps you create prints, knits, and weaves.
                    • -
                    • Lectra Fashion PLM, a product lifecycle management software that helps you manage your collections from design to production.
                    • -
                    • Gerber AccuMark, a pattern design software that can exchange files with Lectra Modaris.
                    • -
                    -

                    Why use a cracked version of Lectra Modaris?

                    -

                    A cracked version of Lectra Modaris is a modified version of the software that bypasses the license and activation process. It allows you to use Lectra Modaris without paying for it or subscribing to it. However, using a cracked version of Lectra Modaris has its advantages and disadvantages, as well as its risks and precautions.

                    -

                    Advantages and disadvantages of using a cracked version

                    -

                    Some of the advantages of using a cracked version of Lectra Modaris are:

                    -
                      -
                    • You can save money and avoid paying for the license and subscription fees.
                    • -
                    • You can use Lectra Modaris without needing a USB dongle or an internet connection for activation.
                    • -
                    • You can access all the features and functions of Lectra Modaris without any limitations or restrictions.
                    • -
                    -

                    Some of the disadvantages of using a cracked version of Lectra Modaris are:

                    -
                      -
                    • You may not be able to update or upgrade Lectra Modaris to the latest version or patch.
                    • -
                    • You may not be able to access the technical support or customer service from Lectra.
                    • -
                    • You may not be able to integrate Lectra Modaris with other software or platforms that require a valid license.
                    • -
                    • You may violate the intellectual property rights and terms of use of Lectra.
                    • -
                    -

                    Risks and precautions of using a cracked version

                    -

                    Some of the risks of using a cracked version of Lectra Modaris are:

                    -
                      -
                    • You may download a fake or corrupted file that contains viruses, malware, or spyware that can harm your computer or steal your data.
                    • -
                    • You may face legal consequences or penalties from Lectra or other authorities for using pirated software.
                    • -
                    • You may compromise the quality and security of your work and data by using an unauthorized and unverified software.
                    • -
                    -

                    Some of the precautions of using a cracked version of Lectra Modaris are:

                    -
                      -
                    • You should only download Lectra Modaris full cracked version from trusted and reputable sources and links.
                    • -
                    • You should verify the authenticity and safety of the downloaded file by checking its size, extension, checksum, etc.
                    • -
                    • You should scan the downloaded file with an antivirus or anti-malware software before opening or installing it.
                    • -
                    • You should backup your work and data regularly in case of any problems or errors with the software.
                    • -
                    -

                    How to download Lectra Modaris full cracked version?

                    -

                    If you decide to use a cracked version of Lectra Modaris, you need to download it from a source that provides it. There are many websites and forums that offer Lectra Modaris full cracked version for free or for a small fee. However, not all of them are reliable or safe. You need to be careful and cautious when choosing where to download Lectra Modaris full cracked version.

                    -

                    Sources and links to download Lectra Modaris full cracked version

                    -

                    Some of the sources and links that claim to provide Lectra Modaris full cracked version are:

                    - - - - - - - -
                    SourceLinkDescription
                    LectraModarisCrack.com[1](https://lectramodariscrack.com/)A website that claims to offer Lectra Modaris V8R2 full cracked version with automatic installation. It requires you to fill out a survey or complete an offer before downloading the file.
                    GarmentSoftware.net[2](https://garmentsoftware.net/lectra-modaris-v8r1-full-crack-automatic-install/)A website that claims to offer Lectra Modaris V8R1 full cracked version with automatic installation. It requires you to pay $50 via PayPal before downloading the file.
                    CadPattern2017.blogspot.com[3](https://cadpattern2017.blogspot.com/2019/03/lectra-modaris-v8r2-expert-full.html)A blog that claims to offer Lectra Modaris V8R2 Expert full cracked version with automatic installation. It provides a direct download link without any payment or survey.
                    Civilax.com[4](https://www.civilax.com/lectra-modaris-v7r2-sp7-3d-prototyping/)A website that claims to offer Lectra Modaris V7R2 SP7 3D Prototyping full cracked version with manual installation. It provides multiple download links from different file hosting services.
                    Gsmdevelopers.comA forum that claims to offer Lectra Modaris V8R1 full cracked version with manual installation. It requires you to register and reply to the thread before downloading the file.
                    -

                    These are just some examples of the sources and links that you can find online. However, we do not endorse or recommend any of them. We are not responsible for the validity, safety, or legality of the files or websites. You should use them at your own risk and discretion.

                    -

                    How to verify the authenticity and safety of the downloaded file

                    -

                    Before you install Lectra Modaris full cracked version on your Windows computer, you should verify the authenticity and safety of the downloaded file. This will help you avoid installing a fake or corrupted file that can damage your computer or compromise your data. Here are some steps that you can follow to verify the downloaded file:

                    -
                      -
                    1. Check the size of the file. The size of Lectra Modaris full cracked version should be around 1 GB to 2 GB, depending on the version and the installation method. If the file is too small or too large, it might be a fake or incomplete file.
                    2. -
                    3. Check the extension of the file. The extension of Lectra Modaris full cracked version should be either .exe, .rar, .zip, or .iso, depending on the format and the compression of the file. If the file has a different or unknown extension, it might be a malicious or incompatible file.
                    4. -
                    5. Check the checksum of the file. The checksum is a unique code that identifies the content and integrity of a file. You can use a tool such as MD5 & SHA Checksum Utility to calculate and compare the checksum of your downloaded file with the checksum provided by the source or link. If the checksums match, it means that the file is authentic and unmodified. If they do not match, it means that the file is altered or corrupted.
                    6. -
                    7. Scan the file with an antivirus or anti-malware software. You can use a software such as Avast, Malwarebytes, or Windows Defender to scan your downloaded file for any viruses, malware, or spyware that can harm your computer or steal your data. If the scan detects any threats, you should delete or quarantine the file immediately.
                    8. -
                    -

                    How to install Lectra Modaris full cracked version on Windows?

                    -

                    After you download Lectra Modaris full cracked version from a source and verify its authenticity and safety, you can proceed to install it on your Windows computer. There are different methods to install Lectra Modaris full cracked version on Windows, depending on the format and the installation mode of the file. However, in this article, we will focus on one method that uses an automatic installation mode.

                    -

                    An automatic installation mode is a method that allows you to install Lectra Modaris full cracked version on Windows without any user intervention or input. It uses an answer file that contains all the necessary information and settings for the installation process. It also uses a USB flash drive that contains all the files and folders for the installation process.

                    -

                    How to create an answer file for automatic installation

                    -

                    An answer file is a text file that contains all the parameters and values for installing Lectra Modaris full cracked version on Windows automatically. It has a .ini extension and a specific format and syntax. You can create an answer file using a text editor such as Notepad or Notepad++. Here are some steps that you can follow to create an answer file for automatic installation:

                    -
                      -
                    1. Open a text editor such as Notepad or Notepad++.
                    2. -
                    3. Type or copy and paste the following text into your text editor:
                    4. -
                      [Modaris] InstallPath=C:\Lectra\ModarisV8R2 LicensePath=C:\Lectra\ModarisV8R2\License [Modaris3D] InstallPath=C:\Lectra\ModarisV8R2 LicensePath=C:\Lectra\ModarisV8R2\License [Modaservice] InstallPath=C:\Lectra\Modaservice [Modarun] InstallPath=C:\Lectra\Modarun [ModaServer] InstallPath=C:\Lectra\ModaServer [ModaClient] InstallPath=C:\Lectra\ModaClient [Options] Language=English 
                      -
                    5. Save your text file as modainstall.ini in your USB flash drive.
                    6. -
                    -

                    This is an example of an answer file for installing Lectra Modaris V8R2 full cracked version on Windows automatically. You can modify some of the parameters and values according to your preferences and needs. For example, you can change the InstallPath to specify a different installation folder for Lectra Modaris. You can also change the Language to specify a different language for Lectra Modaris.

                    -

                    How to use a USB flash drive for automatic installation

                    -

                    A USB flash drive is a portable storage device that can store and transfer files and folders between computers. You can use a USB flash drive to install Lectra Modaris full cracked version on Windows automatically by copying all the necessary files and folders from the downloaded file to the USB flash drive. Here are some steps that you can follow to use a USB flash drive for automatic installation:

                    -
                      -
                    1. Insert your USB flash drive into your computer.
                    2. -
                    3. Open the downloaded file of Lectra Modaris full cracked version using a file extractor such as WinRAR or 7-Zip.
                    4. -
                    5. Extract or copy all the files and folders from the downloaded file to your USB flash drive.
                    6. -
                    7. Make sure that your USB flash drive contains the following files and folders:
                    8. -
                        -
                      • modainstall.ini (the answer file that you created)
                      • -
                      • setup.exe (the executable file that launches the installation process)
                      • -
                      • Lectra (a folder that contains all the subfolders and files for Lectra Modaris)
                      • -
                      -
                    9. Eject your USB flash drive from your computer.
                    10. -
                    -

                    This is an example of how to use a USB flash drive for installing Lectra Modaris V8R2 full cracked version on Windows automatically. You can use a different version or format of Lectra Modaris full cracked version, but make sure that you have all the necessary files and folders for the installation process.

                    -

                    How to troubleshoot problems with automatic installation

                    -

                    Sometimes, you may encounter some problems or errors with the automatic installation of Lectra Modaris full cracked version on Windows. These problems or errors may be caused by various factors, such as incompatible hardware or software, corrupted files or folders, missing drivers or components, etc. Here are some tips and solutions that you can try to troubleshoot problems with automatic installation:

                    -
                      -
                    • Make sure that your computer meets the minimum requirements and compatibility of Lectra Modaris.
                    • -
                    • Make sure that your USB flash drive has enough space and is formatted correctly.
                    • -
                    • Make sure that your answer file is valid and has the correct format and syntax.
                    • -
                    • Make sure that your downloaded file is authentic and safe, and that you have extracted or copied all the files and folders correctly.
                    • -
                    • Make sure that you have disabled or closed any antivirus or anti-malware software that may interfere with the installation process.
                    • -
                    • Make sure that you have run the setup.exe file as an administrator.
                    • -
                    • Make sure that you have followed the instructions and prompts on the screen during the installation process.
                    • -
                    • If you still have problems or errors with the automatic installation, you can try to install Lectra Modaris full cracked version manually by following the steps in the readme.txt file or the video tutorial provided by the source or link.
                    • -
                    -

                    Conclusion

                    -

                    Lectra Modaris is a powerful and versatile patternmaking software for the fashion industry. It allows you to create, modify, grade, and simulate patterns and prototypes with ease and accuracy. However, Lectra Modaris is not a free software. It requires a license and a subscription plan to use it.

                    -

                    If you want to use Lectra Modaris without paying for it, you might be interested in using a cracked version of the software. A cracked version of Lectra Modaris is a modified version of the software that bypasses the license and activation process. It allows you to use Lectra Modaris without any limitations or restrictions.

                    -

                    However, using a cracked version of Lectra Modaris has its advantages and disadvantages, as well as its risks and precautions. You need to be careful and cautious when downloading, installing, and using a cracked version of Lectra Modaris. You also need to be aware of the legal and ethical implications of using pirated software.

                    -

                    In this article, we have shown you how to install Lectra Modaris full cracked version on Windows using an automatic installation method. We have also discussed the pros and cons of using a cracked version, as well as the risks and precautions involved. We hope that this article has been helpful and informative for you.

                    -

                    FAQs

                    -

                    Here are some frequently asked questions about Lectra Modaris full cracked version:

                    -
                      -
                    1. What is the difference between Lectra Modaris V8R1 and V8R2?
                    2. -

                      Lectra Modaris V8R1 and V8R2 are two versions of Lectra Modaris software. Lectra Modaris V8R2 is an updated version of Lectra Modaris V8R1 that has some new features and improvements, such as:

                      -
                        -
                      • It supports Windows 10 operating system.
                      • -
                      • It has a new user interface that is more intuitive and user-friendly.
                      • -
                      • It has a new 3D simulation engine that is faster and more realistic.
                      • -
                      • It has a new 3D fitting module that allows you to adjust the fit of your garments on different mannequins and fabrics.
                      • -
                      • It has a new 3D flattening module that allows you to convert your 3D models into 2D patterns.
                      • -
                      • It has a new 3D printing module that allows you to print your 3D models in physical form.
                      • -
                      -

                      Lectra Modaris V8R2 is compatible with Lectra Modaris V8R1, but not with older versions of Lectra Modaris.

                      -
                    3. How can I get a license and a subscription plan for Lectra Modaris?
                    4. -

                      If you want to use Lectra Modaris legally and ethically, you need to get a license and a subscription plan from Lectra. You can contact Lectra directly or through their authorized distributors or resellers. You can also visit their website or their social media pages to get more information and details about their products and services.

                      -
                    5. How can I learn how to use Lectra Modaris?
                    6. -

                      If you want to learn how to use Lectra Modaris, you have several options and resources available. You can:

                      -
                        -
                      • Watch the video tutorials and webinars that Lectra provides on their website or their YouTube channel.
                      • -
                      • Read the user manuals and guides that Lectra provides on their website or their online help center.
                      • -
                      • Enroll in the online courses and certifications that Lectra offers on their website or their e-learning platform.
                      • -
                      • Join the online forums and communities that Lectra hosts or participates in, such as Lectra Fashion Network, Lectra User Group, etc.
                      • -
                      • Attend the live events and workshops that Lectra organizes or sponsors, such as Lectra Fashion Forum, Lectra Fashion Tech Days, etc.
                      • -

                      b2dd77e56b
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Metatrader4expertadvisorcracked Fixed.md b/spaces/tioseFevbu/cartoon-converter/scripts/Metatrader4expertadvisorcracked Fixed.md deleted file mode 100644 index 612e995aaf3ad2ab82f44312d426a2fb3c762f65..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Metatrader4expertadvisorcracked Fixed.md +++ /dev/null @@ -1,228 +0,0 @@ -
                      -
                      - What are the benefits and risks of EAs?
                      - How to install and run MT4? | | Creating an EA | - How to use the MetaEditor and the MQL4 language?
                      - How to create a simple EA based on a MACD indicator?
                      - How to compile and test your EA in the Strategy Tester? | | Using an EA | - How to import an EA from the MT4 market or other sources?
                      - How to set up and launch an EA on a chart?
                      - How to monitor and manage your EA's performance? | | Examples of EAs | - What are some of the best EAs for MT4 available for free or for purchase?
                      - What are some of the features and strategies of these EAs?
                      - How to evaluate and compare different EAs? | | Conclusion | - What are the main takeaways from this article?
                      - What are some of the best practices and tips for using EAs in MT4?
                      - Where to find more resources and information on EAs and MT4? | ## Article with HTML formatting

                      Introduction

                      -

                      If you are a forex trader, you might have heard of expert advisors (EAs), also known as forex robots. These are programs that can automate your trading strategies and execute trades on your behalf, using the MetaTrader 4 (MT4) platform. In this article, I will explain what EAs are, why you might want to use them, and how to create and use them in MT4.

                      -

                      What are EAs and why use them?

                      -

                      EAs are programs that run on the MT4 platform, using a programming language called MQL4. They can perform various tasks, such as:

                      -

                      metatrader4expertadvisorcracked


                      DOWNLOADhttps://urlcod.com/2uHvLs



                      -
                        -
                      • Analyzing market data and indicators
                      • -
                      • Generating trading signals and alerts
                      • -
                      • Opening, modifying, and closing orders
                      • -
                      • Managing risk and money management
                      • -
                      • Optimizing trading performance
                      • -
                      -

                      EAs can be customized to suit your trading style, preferences, and goals. You can either create your own EA using the MetaEditor tool, or import an existing EA from the MT4 market or other sources.

                      -

                      EAs can help you save time, avoid emotional trading, and optimize your performance. Some of the benefits of using EAs are:

                      -
                        -
                      • You don't have to monitor the market 24/7, as EAs can trade for you anytime and anywhere.
                      • -
                      • You don't have to worry about missing trading opportunities, as EAs can scan multiple markets and timeframes simultaneously.
                      • -
                      • You don't have to rely on your intuition or emotions, as EAs can follow your predefined rules and logic objectively.
                      • -
                      • You don't have to manually calculate and execute trades, as EAs can do it for you with speed and accuracy.
                      • -
                      • You don't have to repeat the same tasks over and over, as EAs can automate them for you efficiently.
                      • -
                      -

                      What are the benefits and risks of EAs?

                      -

                      While EAs can offer many advantages, they also come with some drawbacks and risks. Some of the challenges and limitations of using EAs are:

                      -
                        -
                      • You need to have a reliable computer and internet connection, as EAs depend on them to function properly.
                      • -
                      • You need to have a good understanding of forex trading, programming, and testing, as EAs require technical skills and knowledge to create and use.
                      • -
                      • You need to have realistic expectations and goals, as EAs are not magic tools that can guarantee profits or eliminate losses.
                      • -
                      • You need to monitor and update your EA regularly, as EAs can become outdated or malfunction due to changing market conditions or technical issues.
                      • -
                      • You need to be aware of the potential scams or frauds that may sell faulty or malicious EAs online.
                      • -
                      -

                      Therefore, before you decide to use an EA, you should do your research, test your EA thoroughly, and practice risk management.

                      -

                      How to install and run MT4?How to install and run MT4?

                      -

                      To install and run MT4 on your Windows device, you need to follow these steps:

                      -
                        -
                      1. Download the MT4 installation file from the official website of your forex broker, or from the MetaQuotes website. You can also use the link below to download the file.
                      2. -
                      3. Run the installation file (mt4setup.exe) and agree to the terms of the license agreement. Then, choose a directory for installing the program and follow the installation wizard tips.
                      4. -
                      5. When the installation is completed, click the Finish button to exit. You should see the MT4 icon on your desktop or in your start menu.
                      6. -
                      7. Click on the MT4 icon to launch the program. You will see a window where you can choose a trading server. You can either use the default server provided by MetaQuotes, or enter the server details of your forex broker. Then, click Next.
                      8. -
                      9. Next, you will see a window where you can open a demo account or a live account. If you already have an account with your broker, you can enter your login and password and click Next. If you don't have an account, you can click on "Open a demo account" and fill in your personal details and preferences. Then, click Next.
                      10. -
                      11. Finally, you will see a window where you can review your account information and settings. You can also change your password if you want. Then, click Finish.
                      12. -
                      -

                      Congratulations! You have successfully installed and run MT4 on your Windows device. You can now start trading with your EA or manually.

                      -

                      Creating an EA

                      -

                      If you want to create your own EA, you need to use the MetaEditor tool and the MQL4 language. MetaEditor is an integrated development environment (IDE) that allows you to write, edit, compile, and debug your MQL4 code. MQL4 is a programming language that is based on C++ and is designed specifically for developing EAs, indicators, scripts, and libraries for MT4.

                      -

                      -

                      How to use the MetaEditor and the MQL4 language?

                      -

                      To use the MetaEditor and the MQL4 language, you need to follow these steps:

                      -
                        -
                      1. Open MT4 and click on Tools -> MetaQuotes Language Editor. This will launch the MetaEditor program.
                      2. -
                      3. In MetaEditor, you will see a toolbar with various buttons and menus. You can use them to create new files, open existing files, save files, compile files, debug files, and more.
                      4. -
                      5. To create a new EA file, click on File -> New -> Expert Advisor (template). This will open a wizard that will guide you through the process of creating an EA.
                      6. -
                      7. In the wizard, you will need to enter some basic information about your EA, such as its name, author, link, inputs, properties, etc. You can also choose a template for your EA from several options.
                      8. -
                      9. After you finish the wizard, you will see a new file with the extension .mq4 in MetaEditor. This is where you will write your MQL4 code for your EA.
                      10. -
                      11. To learn how to write MQL4 code, you can refer to the official documentation, which provides a comprehensive reference of all the functions, variables, constants, operators, data types, etc. that you can use in MQL4. You can also find many tutorials and examples online that can help you learn MQL4 syntax and logic.
                      12. -
                      -

                      How to create a simple EA based on a MACD indicator?

                      -

                      To illustrate how to create an EA in MQL4, let's create a simple EA that uses a MACD indicator to generate buy and sell signals. MACD stands for Moving Average Convergence Divergence and is one of the most popular technical indicators in forex trading. It measures the difference between two moving averages (usually 12-period and 26-period) and compares it with a signal line (usually 9-period). When the MACD crosses above the signal line, it indicates a bullish trend; when it crosses below the signal line, it indicates a bearish trend.

                      -

                      To create an EA based on MACD, we will use the following logic:

                      -
                        -
                      • We will use EUR/USD as our currency pair and H1 as our timeframe.
                      • -
                      • We will use 0.01 as our lot size and 50 as our stop loss and take profit in pips.
                      • -
                      • We will use 12, 26, and 9 as our MACD parameters.
                      • -
                      • We will open a buy order when the MACD crosses above the signal line from below.
                      • -
                      • We will open a sell order when We will open a sell order when the MACD crosses below the signal line from above.
                      • -
                      • We will close a buy order when the MACD crosses below the signal line from above.
                      • -
                      • We will close a sell order when the MACD crosses above the signal line from below.
                      • -
                      -

                      To implement this logic in MQL4, we will use the following code:

                      - -//+------------------------------------------------------------------+ //| Expert initialization function | //+------------------------------------------------------------------+ int OnInit() //--- create timer EventSetTimer(1); // 1 second //--- return(INIT_SUCCEEDED); //+------------------------------------------------------------------+ //| Expert deinitialization function | //+------------------------------------------------------------------+ void OnDeinit(const int reason) //--- destroy timer EventKillTimer(); //+------------------------------------------------------------------+ //| Expert tick function | //+------------------------------------------------------------------+ void OnTick() //--- get current prices double Bid=NormalizeDouble(SymbolInfoDouble(_Symbol,SYMBOL_BID),_Digits); double Ask=NormalizeDouble(SymbolInfoDouble(_Symbol,SYMBOL_ASK),_Digits); //--- get MACD values double macd_main=iMACD(_Symbol,_Period,12,26,9,PRICE_CLOSE,MODE_MAIN,0); double macd_signal=iMACD(_Symbol,_Period,12,26,9,PRICE_CLOSE,MODE_SIGNAL,0); //--- get previous MACD values double macd_main_prev=iMACD(_Symbol,_Period,12,26,9,PRICE_CLOSE,MODE_MAIN,1); double macd_signal_prev=iMACD(_Symbol,_Period,12,26,9,PRICE_CLOSE,MODE_SIGNAL,1); //--- check for buy signal if(macd_main>macd_signal && macd_main_prevmacd_signal_prev) //--- open sell order int ticket=OrderSend(_Symbol,OP_SELL,0.01,Bid,10,Ask+50*Point,Ask-50*Point,"Sell Order",0,0,clrRed); //--- check for errors if(ticket<0) Print("Error opening sell order: ",GetLastError()); else Print("Sell order opened successfully: ",ticket); //--- loop through all orders for(int i=OrdersTotal()-1;i>=0;i--) -

                      How to compile and test your EA in the Strategy Tester?

                      -

                      After you write your MQL4 code for your EA in MetaEditor, you need to compile it and test it in the Strategy Tester. The Strategy Tester is a tool that allows you to backtest your EA on historical data and optimize its parameters. To compile and test To compile and test your EA in the Strategy Tester, you need to follow these steps:

                        -
                      1. In MetaEditor, click on the Compile button on the toolbar, or press F7. This will check your code for any errors or warnings and generate an executable file with the extension .ex4 in the same folder as your .mq4 file.
                      2. -
                      3. Open MT4 and click on View -> Strategy Tester. This will open the Strategy Tester panel at the bottom of the screen.
                      4. -
                      5. In the Strategy Tester panel, select your EA from the Expert Advisor drop-down menu. Then, select the currency pair, timeframe, model, spread, and date range that you want to test your EA on.
                      6. -
                      7. Click on the Start button to begin the backtesting process. You will see a progress bar and a graph showing the results of your EA. You can also switch to the Report, Graph, Results, or Journal tabs to see more details and statistics.
                      8. -
                      9. If you want to optimize your EA's parameters, you can click on the Expert properties button and go to the Testing tab. There, you can enter the values or ranges of values that you want to test for each input parameter of your EA. Then, go to the Optimization tab and select the optimization method and criteria that you want to use. Click OK and then Start to run the optimization process. You will see a list of all the tested combinations and their results. You can sort them by any column or filter them by any criterion. You can also switch to the Optimization Graph or Optimization Results tabs to see more visualizations and information.
                      10. -
                      -

                      Congratulations! You have successfully compiled and tested your EA in the Strategy Tester. You can now use your EA on a demo or live account.

                      -

                      Using an EA

                      -

                      If you want to use an existing EA, you need to import it into MT4 and set it up on a chart. You can either use an EA that you created yourself, or an EA that you downloaded from the MT4 market or other sources.

                      -

                      How to import an EA from the MT4 market or other sources?

                      -

                      To import an EA from the MT4 market or other sources, you need to follow these steps:

                      -
                        -
                      1. If you want to use an EA from the MT4 market, you need to open MT4 and click on Tools -> Options -> Community. There, you need to enter your MQL5.com login and password and click OK. Then, you can go to View -> Terminal -> Market and browse through the available EAs. You can also use the search function or filter by category, price, rating, etc. When you find an EA that you like, you can click on it and read its description, reviews, screenshots, etc. You can also test it on a demo account or rent it for a certain period of time. To purchase or rent an EA, you need to have enough funds in your MQL5.com account or use a payment method such as PayPal or a credit card.
                      2. -
                      3. If you want to use an EA from other sources, such as websites, forums, blogs, etc., you need to download the file with the extension .ex4 or .mq4 and save it in your MT4 data folder. To find your MT4 data folder, you can open MT4 and click on File -> Open Data Folder. Then, go to MQL4 -> Experts and paste your file there.
                      4. -
                      5. After you import your EA into MT4, you need to restart MT4 or refresh the Navigator panel by right-clicking on it and selecting Refresh. You should see your EA under the Expert Advisors category in the Navigator panel.
                      6. -
                      -

                      How to set up and launch an EA on a chart?

                      -

                      To set up and launch an EA on a chart, you need to follow these steps:

                      -
                        -
                      1. Open MT4 and select the currency pair and timeframe that you want to use for your EA.
                      2. -
                      3. Drag and drop your EA from the Navigator panel onto the chart. You will see a window where you can configure your EA's settings.
                      4. -
                      5. In the window, go to the Common tab and check or uncheck the options that you want to enable or disable for your EA. For example, you can enable "Allow live trading", "Allow DLL imports", "Allow import of external experts", etc.
                      6. -
                      7. Go to the Inputs tab and adjust the values of each input parameter of your EA according to your preferences and goals. You can also load or save a set of parameters by clicking on Load or Save.
                      8. -
                      9. Click OK to confirm your settings and attach your EA to the chart. You should see a smiley face icon at the top right corner of the chart, indicating that your EA is active and ready to trade.
                      10. -
                      -

                      Congratulations! You have successfully set up and launched your EA on a chart. You can now let your EA trade for you according to your settings and strategy.

                      -

                      How to monitor and manage your EA's performance?

                      -

                      To monitor and manage your EA's performance, you need to follow these steps:

                      -
                        -
                      1. Open MT4 and go to View -> Terminal. This will open the Terminal panel at the bottom of the screen.
                      2. -
                      3. In the Terminal panel, you can switch to different tabs to see different information and statistics about your EA's performance. For example, you can go to the Trade tab to see your current orders, balance, equity, margin, etc. You can also modify or close your orders manually by right-clicking on them and selecting the appropriate option.
                      4. -
                      5. You can go to the Account History tab to see your past orders, profits, losses, commissions, swaps, etc. You can also generate a detailed report of your trading history by right-clicking on any order and selecting Save as Report.
                      6. -
                      7. You can go to the Journal tab to see the log of your EA's actions, messages, errors, etc. You can also filter the log by date, time, priority, etc. by right-clicking on any entry and selecting Filter.
                      8. -
                      9. You can go to the Experts tab to see the output of your EA's print functions, as well as any warnings or errors that may occur during its execution. You can also filter the output by date, time, priority, etc. by right-clicking on any entry and selecting Filter.
                      10. -
                      11. You can go to the Graph tab to see a graphical representation of your EA's performance over time. You can also customize the graph by changing its scale, type, color, etc. by right-clicking on it and selecting Properties.
                      12. -
                      -

                      By monitoring and managing your EA's performance, you can evaluate its effectiveness, identify its strengths and weaknesses, and make any necessary adjustments or improvements.

                      -

                      Examples of EAs

                      -

                      If you don't want to create your own EA, or if you want to try some different EAs, you can find many examples of EAs online. Some of them are free, while others are paid. Some of them are simple, while others are complex. Some of them are reliable, while others are risky. In this section, I will provide you with some examples of EAs that are available for MT4, as well as some of their features and strategies.

                      -

                      What are some of the best EAs for MT4 available for free or for purchase?

                      -

                      There are many EAs for MT4 available for free or for purchase online. However, not all of them are trustworthy or profitable. Therefore, you should always do your research before using any EA. You should also test it on a demo account before using it on a live account. Here are some examples of EAs that are popular and reputable among MT4 users:

                      - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Forex Diamond EA - - - - - - - - - - - - - - - - -
                      NameDescriptionPriceSource
                      MACD SampleThis is an EA that comes pre-installed with MT4. It is based on the MACD indicator and uses a simple trend-following strategy.FreeMT4 market
                      Moving AverageThis is another EA that comes pre-installed with MT4. It is based on the Moving Average indicator and uses a simple crossover strategy.FreeMT4 market
                      WallStreet Forex Robot 2.0 EvolutionThis is a paid EA that claims to be one of the best-selling and most profitable EAs in the market. It uses a scalping strategy that works on 8 currency pairs and 5 timeframes.$237 (one-time payment)
                      Forex Diamond EAThis is another paid EA that claims to be one of the most advanced and intelligent EAs in the market. It uses a combination of trend-following, countertrend, and scalping strategies that work on 4 currency pairs and 3 timeframes.$237 (one-time payment)This is another paid EA that claims to be one of the most advanced and intelligent EAs in the market. It uses a combination of trend-following, countertrend, and scalping strategies that work on 4 currency pairs and 3 timeframes.$237 (one-time payment)[Forex Diamond EA website]
                      Forex FuryThis is yet another paid EA that claims to be one of the most consistent and reliable EAs in the market. It uses a scalping strategy that works on 1 currency pair and 1 timeframe.$229.99 (one-time payment)[Forex Fury website]
                      Odin Forex RobotThis is a paid EA that claims to be one of the most powerful and profitable EAs in the market. It uses a grid trading strategy that works on multiple currency pairs and timeframes.$129 (one-time payment)[Odin Forex Robot website]
                      -

                      These are just some examples of EAs that are available for MT4. There are many more EAs that you can find online, but you should always be careful and do your due diligence before using any of them. Remember, there is no such thing as a perfect EA that can guarantee success or avoid losses.

                      -

                      What are some of the features and strategies of these EAs?

                      -

                      Each EA has its own features and strategies that make it unique and different from others. Some of the common features and strategies that you may encounter when using EAs are:

                      -
                        -
                      • Indicators: These are tools that help you analyze the market data and trends, such as moving averages, MACD, RSI, etc. EAs can use indicators to generate trading signals and alerts based on certain conditions or rules.
                      • -
                      • Oscillators: These are indicators that measure the momentum and direction of the market, such as stochastic, CCI, ADX, etc. EAs can use oscillators to identify overbought or oversold situations and potential reversals.
                      • -
                      • Trend-following: This is a strategy that involves following the direction of the market, whether it is up, down, or sideways. EAs can use trend-following to enter trades in the direction of the trend and exit trades when the trend changes.
                      • -
                      • Countertrend: This is a strategy that involves going against the direction of the market, betting on a reversal or a correction. EAs can use countertrend to enter trades when the market is overextended or deviates from its mean and exit trades when the market returns to its normal state.
                      • -
                      • Scalping: This is a strategy that involves making small profits from short-term price movements, usually lasting from seconds to minutes. EAs can use scalping to open and close trades quickly and frequently, taking advantage of small price fluctuations.
                      • -
                      • Grid trading: This is a strategy that involves placing multiple orders at different price levels, creating a grid-like pattern. EAs can use grid trading to profit from both trending and ranging markets, as well as hedge against losses.
                      • -
                      • Martingale: This is a strategy that involves doubling your bet after every loss, hoping to recover your losses and make a profit. EAs can use martingale to increase their lot size after every losing trade, until they hit a winning trade or reach their maximum risk limit.
                      • -
                      • Hedging: This is a strategy that involves opening two opposite positions on the same or correlated instruments, reducing your exposure to market risk. EAs can use hedging to balance their long and short positions, or to protect their profits or losses.
                      • -
                      -

                      These are just some examples of features and strategies that EAs can use. There are many more features and strategies that you can find online, but you should always understand how they work and what their pros and cons are before using them.

                      -

                      How to evaluate and compare different EAs?

                      -

                      To evaluate and compare different EAs, you need to use some criteria and metrics that can help you measure their performance and quality. Some of the common criteria and metrics that you can use are:

                      -
                        -
                      • Backtesting results: These are the results of testing your EA on historical data using the Strategy Tester. They can show you how your EA would have performed in the past under certain conditions and settings. You can look at metrics such as profit factor, drawdown, win rate, return on investment, etc.
                      • -
                      • Forward testing results: These are the results of testing your EA on live or demo data using a real or simulated account. They can show They can show you how your EA performs in the present under real market conditions and settings. You can look at metrics such as balance, equity, profit, loss, trades, etc.
                      • -
                      • Reviews and ratings: These are the feedback and opinions of other users who have used or purchased your EA. They can show you the reputation and credibility of your EA, as well as its strengths and weaknesses. You can look at metrics such as stars, comments, likes, dislikes, etc.
                      • -
                      • Features and strategies: These are the characteristics and methods of your EA that make it unique and different from others. They can show you the functionality and suitability of your EA for your trading style, preferences, and goals. You can look at metrics such as indicators, oscillators, trend-following, countertrend, scalping, grid trading, martingale, hedging, etc.
                      • -
                      • Price and value: These are the costs and benefits of using or purchasing your EA. They can show you the affordability and profitability of your EA, as well as its return on investment. You can look at metrics such as free, paid, one-time payment, subscription, rent, trial, etc.
                      • -
                      -

                      To evaluate and compare different EAs, you can use a table or a chart to display the criteria and metrics that you want to use. For example, you can use a table like this:

                      - - - - - - - - - - - - - - - - - - - - - - -Paid
                      Subscription
                      $49/month - -Profit factor: 0.5
                      -
                      NameBacktesting resultsForward testing resultsReviews and ratingsFeatures and strategiesPrice and value
                      EA 1Profit factor: 1.5
                      Drawdown: 10%
                      Win rate: 60%
                      Balance: $1000
                      Equity: $900
                      Profit: $100
                      Stars: 4/5
                      Comments: Positive
                      Likes: 100
                      Indicators: MACD
                      Oscillators: Stochastic
                      Trend-following: Yes
                      Paid
                      One-time payment
                      $99
                      EA 2Profit factor: 2.0
                      Drawdown: 5%
                      Win rate: 80%
                      Balance: $2000
                      Equity: $1900
                      Profit: $200
                      Stars: 5/5
                      Comments: Positive
                      Likes: 200
                      Indicators: Moving Average
                      Oscillators: CCI
                      Countertrend: Yes
                      EA 3
                      -

                      You can also use a chart like this:

                      - ![Chart](https://i.imgur.com/7y6lZQ3.png)

                      To evaluate and compare different EAs, you should use multiple criteria and metrics that are relevant and important for your trading objectives. You should also test different EAs on different data sets and time periods to see how they perform under various scenarios. You should also be aware of the limitations and risks of using EAs, such as technical issues, market changes, human errors, etc.

                      -

                      Conclusion

                      -

                      In this article, I have explained what EAs are, why you might want to use them, and how to create and use them in MT4. I have also provided you with some examples and tips on how to evaluate and compare different EAs.

                      -

                      EAs are programs that can automate your trading strategies and execute trades on your behalf using the MT4 platform. They can help you save time, avoid emotional trading, and optimize your performance. However, they also come with some drawbacks and risks that you should be aware of.

                      -

                      To create an EA in MT4, you need to use the MetaEditor tool and the MQL4 language. To use an EA in MT4, you need to import it into MT4 and set it up on a chart To use an EA in MT4, you need to import it into MT4 and set it up on a chart. You can either use an EA that you created yourself, or an EA that you downloaded from the MT4 market or other sources. You can also monitor and manage your EA's performance using the Terminal panel and the Strategy Tester. To evaluate and compare different EAs, you need to use some criteria and metrics that can help you measure their performance and quality. You can also use a table or a chart to display the criteria and metrics that you want to use. You should also test different EAs on different data sets and time periods to see how they perform under various scenarios. I hope this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below. Thank you for reading and happy trading!

                      FAQs

                      -

                      What is MetaTrader 4 (MT4)?

                      -

                      MetaTrader 4 (MT4) is a trading platform that allows you to trade forex, CFDs, futures, and other instruments online. It is one of the most popular and widely used trading platforms in the world. It offers various features and tools, such as charts, indicators, scripts, EAs, etc., that can help you analyze the market and execute trades.

                      -

                      What is an expert advisor (EA)?

                      -

                      An expert advisor (EA) is a program that can automate your trading strategies and execute trades on your behalf using the MT4 platform. It is also known as a forex robot or a trading bot. It can perform various tasks, such as analyzing market data and indicators, generating trading signals and alerts, opening, modifying, and closing orders, managing risk and money management, optimizing trading performance, etc.

                      -

                      How to create an EA in MT4?

                      -

                      To create an EA in MT4, you need to use the MetaEditor tool and the MQL4 language. MetaEditor is an integrated development environment (IDE) that allows you to write, edit, compile, and debug your MQL4 code. MQL4 is a programming language that is based on C++ and is designed specifically for developing EAs, indicators, scripts, and libraries for MT4.

                      -

                      How to use an EA in MT4?

                      -

                      To use an EA in MT4, you need to import it into MT4 and set it up on a chart. You can either use an EA that you created yourself, or an EA that you downloaded from the MT4 market or other sources. You can also monitor and manage your EA's performance using the Terminal panel and the Strategy Tester.

                      -

                      How to evaluate and compare different EAs?

                      -

                      To evaluate and compare different EAs, you need to use some criteria and metrics that can help you measure their performance and quality. You can also use a table or a chart to display the criteria and metrics that you want to use. You should also test different EAs on different data sets and time periods to see how they perform under various scenarios.

                      b2dd77e56b
                      -
                      -
                      \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/build_meta.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/build_meta.py deleted file mode 100644 index a0d46a7989ad9a3e6fa45a3014d47f765ef357eb..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/build_meta.py +++ /dev/null @@ -1,304 +0,0 @@ -"""A PEP 517 interface to setuptools - -Previously, when a user or a command line tool (let's call it a "frontend") -needed to make a request of setuptools to take a certain action, for -example, generating a list of installation requirements, the frontend would -would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. - -PEP 517 defines a different method of interfacing with setuptools. Rather -than calling "setup.py" directly, the frontend should: - - 1. Set the current directory to the directory with a setup.py file - 2. Import this module into a safe python interpreter (one in which - setuptools can potentially set global variables or crash hard). - 3. Call one of the functions defined in PEP 517. - -What each function does is defined in PEP 517. However, here is a "casual" -definition of the functions (this definition should not be relied on for -bug reports or API stability): - - - `build_wheel`: build a wheel in the folder and return the basename - - `get_requires_for_build_wheel`: get the `setup_requires` to build - - `prepare_metadata_for_build_wheel`: get the `install_requires` - - `build_sdist`: build an sdist in the folder and return the basename - - `get_requires_for_build_sdist`: get the `setup_requires` to build - -Again, this is not a formal definition! Just a "taste" of the module. -""" - -import io -import os -import sys -import tokenize -import shutil -import contextlib -import tempfile -import warnings - -import setuptools -import distutils -from ._reqs import parse_strings -from .extern.more_itertools import always_iterable - - -__all__ = ['get_requires_for_build_sdist', - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'build_sdist', - '__legacy__', - 'SetupRequirementsError'] - - -class SetupRequirementsError(BaseException): - def __init__(self, specifiers): - self.specifiers = specifiers - - -class Distribution(setuptools.dist.Distribution): - def fetch_build_eggs(self, specifiers): - specifier_list = list(parse_strings(specifiers)) - - raise SetupRequirementsError(specifier_list) - - @classmethod - @contextlib.contextmanager - def patch(cls): - """ - Replace - distutils.dist.Distribution with this class - for the duration of this context. - """ - orig = distutils.core.Distribution - distutils.core.Distribution = cls - try: - yield - finally: - distutils.core.Distribution = orig - - -@contextlib.contextmanager -def no_install_setup_requires(): - """Temporarily disable installing setup_requires - - Under PEP 517, the backend reports build dependencies to the frontend, - and the frontend is responsible for ensuring they're installed. - So setuptools (acting as a backend) should not try to install them. - """ - orig = setuptools._install_setup_requires - setuptools._install_setup_requires = lambda attrs: None - try: - yield - finally: - setuptools._install_setup_requires = orig - - -def _get_immediate_subdirectories(a_dir): - return [name for name in os.listdir(a_dir) - if os.path.isdir(os.path.join(a_dir, name))] - - -def _file_with_extension(directory, extension): - matching = ( - f for f in os.listdir(directory) - if f.endswith(extension) - ) - try: - file, = matching - except ValueError: - raise ValueError( - 'No distribution was found. Ensure that `setup.py` ' - 'is not empty and that it calls `setup()`.') - return file - - -def _open_setup_script(setup_script): - if not os.path.exists(setup_script): - # Supply a default setup.py - return io.StringIO(u"from setuptools import setup; setup()") - - return getattr(tokenize, 'open', open)(setup_script) - - -@contextlib.contextmanager -def suppress_known_deprecation(): - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', 'setup.py install is deprecated') - yield - - -class _BuildMetaBackend: - - @staticmethod - def _fix_config(config_settings): - """ - Ensure config settings meet certain expectations. - - >>> fc = _BuildMetaBackend._fix_config - >>> fc(None) - {'--global-option': []} - >>> fc({}) - {'--global-option': []} - >>> fc({'--global-option': 'foo'}) - {'--global-option': ['foo']} - >>> fc({'--global-option': ['foo']}) - {'--global-option': ['foo']} - """ - config_settings = config_settings or {} - config_settings['--global-option'] = list(always_iterable( - config_settings.get('--global-option'))) - return config_settings - - def _get_build_requires(self, config_settings, requirements): - config_settings = self._fix_config(config_settings) - - sys.argv = sys.argv[:1] + ['egg_info'] + \ - config_settings["--global-option"] - try: - with Distribution.patch(): - self.run_setup() - except SetupRequirementsError as e: - requirements += e.specifiers - - return requirements - - def run_setup(self, setup_script='setup.py'): - # Note that we can reuse our build directory between calls - # Correctness comes first, then optimization later - __file__ = setup_script - __name__ = '__main__' - - with _open_setup_script(__file__) as f: - code = f.read().replace(r'\r\n', r'\n') - - exec(code, locals()) - - def get_requires_for_build_wheel(self, config_settings=None): - return self._get_build_requires( - config_settings, requirements=['wheel']) - - def get_requires_for_build_sdist(self, config_settings=None): - return self._get_build_requires(config_settings, requirements=[]) - - def prepare_metadata_for_build_wheel(self, metadata_directory, - config_settings=None): - sys.argv = sys.argv[:1] + [ - 'dist_info', '--egg-base', metadata_directory] - with no_install_setup_requires(): - self.run_setup() - - dist_info_directory = metadata_directory - while True: - dist_infos = [f for f in os.listdir(dist_info_directory) - if f.endswith('.dist-info')] - - if ( - len(dist_infos) == 0 and - len(_get_immediate_subdirectories(dist_info_directory)) == 1 - ): - - dist_info_directory = os.path.join( - dist_info_directory, os.listdir(dist_info_directory)[0]) - continue - - assert len(dist_infos) == 1 - break - - # PEP 517 requires that the .dist-info directory be placed in the - # metadata_directory. To comply, we MUST copy the directory to the root - if dist_info_directory != metadata_directory: - shutil.move( - os.path.join(dist_info_directory, dist_infos[0]), - metadata_directory) - shutil.rmtree(dist_info_directory, ignore_errors=True) - - return dist_infos[0] - - def _build_with_temp_dir(self, setup_command, result_extension, - result_directory, config_settings): - config_settings = self._fix_config(config_settings) - result_directory = os.path.abspath(result_directory) - - # Build in a temporary directory, then copy to the target. - os.makedirs(result_directory, exist_ok=True) - with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir: - sys.argv = (sys.argv[:1] + setup_command + - ['--dist-dir', tmp_dist_dir] + - config_settings["--global-option"]) - with no_install_setup_requires(): - self.run_setup() - - result_basename = _file_with_extension( - tmp_dist_dir, result_extension) - result_path = os.path.join(result_directory, result_basename) - if os.path.exists(result_path): - # os.rename will fail overwriting on non-Unix. - os.remove(result_path) - os.rename(os.path.join(tmp_dist_dir, result_basename), result_path) - - return result_basename - - def build_wheel(self, wheel_directory, config_settings=None, - metadata_directory=None): - with suppress_known_deprecation(): - return self._build_with_temp_dir(['bdist_wheel'], '.whl', - wheel_directory, config_settings) - - def build_sdist(self, sdist_directory, config_settings=None): - return self._build_with_temp_dir(['sdist', '--formats', 'gztar'], - '.tar.gz', sdist_directory, - config_settings) - - -class _BuildMetaLegacyBackend(_BuildMetaBackend): - """Compatibility backend for setuptools - - This is a version of setuptools.build_meta that endeavors - to maintain backwards - compatibility with pre-PEP 517 modes of invocation. It - exists as a temporary - bridge between the old packaging mechanism and the new - packaging mechanism, - and will eventually be removed. - """ - def run_setup(self, setup_script='setup.py'): - # In order to maintain compatibility with scripts assuming that - # the setup.py script is in a directory on the PYTHONPATH, inject - # '' into sys.path. (pypa/setuptools#1642) - sys_path = list(sys.path) # Save the original path - - script_dir = os.path.dirname(os.path.abspath(setup_script)) - if script_dir not in sys.path: - sys.path.insert(0, script_dir) - - # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to - # get the directory of the source code. They expect it to refer to the - # setup.py script. - sys_argv_0 = sys.argv[0] - sys.argv[0] = setup_script - - try: - super(_BuildMetaLegacyBackend, - self).run_setup(setup_script=setup_script) - finally: - # While PEP 517 frontends should be calling each hook in a fresh - # subprocess according to the standard (and thus it should not be - # strictly necessary to restore the old sys.path), we'll restore - # the original path so that the path manipulation does not persist - # within the hook after run_setup is called. - sys.path[:] = sys_path - sys.argv[0] = sys_argv_0 - - -# The primary backend -_BACKEND = _BuildMetaBackend() - -get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel -get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist -prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel -build_wheel = _BACKEND.build_wheel -build_sdist = _BACKEND.build_sdist - - -# The legacy backend -__legacy__ = _BuildMetaLegacyBackend() diff --git a/spaces/tomandandy/MusicGen3/tests/modules/test_lstm.py b/spaces/tomandandy/MusicGen3/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/tomandandy/MusicGen3/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py deleted file mode 100644 index 2136255464715bcee89b47f1437a9dd4040e04c7..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pafpn/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pafpn/README.md deleted file mode 100644 index 3ddd451c1fcfc8b6d292490d2887d18b4d9cbbdb..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/pafpn/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# Path Aggregation Network for Instance Segmentation - -## Introduction - - - -``` -@inproceedings{liu2018path, - author = {Shu Liu and - Lu Qi and - Haifang Qin and - Jianping Shi and - Jiaya Jia}, - title = {Path Aggregation Network for Instance Segmentation}, - booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, - year = {2018} -} -``` - -## Results and Models - -## Results and Models - -| Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:-------------:|:----------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| R-50-FPN | pytorch | 1x | 4.0 | 17.2 | 37.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_20200503_105836.log.json) | diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sabl/README.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sabl/README.md deleted file mode 100644 index bb612a5f7b49469d8ea5f5d4e3bdd91991f4b461..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/sabl/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Side-Aware Boundary Localization for More Precise Object Detection - -## Introduction - - - -We provide config files to reproduce the object detection results in the ECCV 2020 Spotlight paper for [Side-Aware Boundary Localization for More Precise Object Detection](https://arxiv.org/abs/1912.04260). - -```latex -@inproceedings{Wang_2020_ECCV, - title = {Side-Aware Boundary Localization for More Precise Object Detection}, - author = {Jiaqi Wang and Wenwei Zhang and Yuhang Cao and Kai Chen and Jiangmiao Pang and Tao Gong and Jianping Shi and Chen Change Loy and Dahua Lin}, - booktitle = {ECCV}, - year = {2020} -} -``` - -## Results and Models - -The results on COCO 2017 val is shown in the below table. (results on test-dev are usually slightly higher than val). -Single-scale testing (1333x800) is adopted in all results. - -| Method | Backbone | Lr schd | ms-train | box AP | Config | Download | -| :----------------: | :-------: | :-----: | :------: | :----: | :----------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| SABL Faster R-CNN | R-50-FPN | 1x | N | 39.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/sabl_faster_rcnn_r50_fpn_1x_coco-e867595b.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r50_fpn_1x_coco/20200830_130324.log.json) | -| SABL Faster R-CNN | R-101-FPN | 1x | N | 41.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_faster_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/sabl_faster_rcnn_r101_fpn_1x_coco-f804c6c1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_faster_rcnn_r101_fpn_1x_coco/20200830_183949.log.json) | -| SABL Cascade R-CNN | R-50-FPN | 1x | N | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/sabl_cascade_rcnn_r50_fpn_1x_coco-e1748e5e.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r50_fpn_1x_coco/20200831_033726.log.json) | -| SABL Cascade R-CNN | R-101-FPN | 1x | N | 43.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/sabl_cascade_rcnn_r101_fpn_1x_coco-2b83e87c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco/20200831_141745.log.json) | - -| Method | Backbone | GN | Lr schd | ms-train | box AP | Config | Download | -| :------------: | :-------: | :---: | :-----: | :---------: | :----: | :---------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| SABL RetinaNet | R-50-FPN | N | 1x | N | 37.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/sabl_retinanet_r50_fpn_1x_coco-6c54fd4f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_1x_coco/20200830_053451.log.json) | -| SABL RetinaNet | R-50-FPN | Y | 1x | N | 38.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r50_fpn_gn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/sabl_retinanet_r50_fpn_gn_1x_coco-e16dfcf1.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r50_fpn_gn_1x_coco/20200831_141955.log.json) | -| SABL RetinaNet | R-101-FPN | N | 1x | N | 39.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/sabl_retinanet_r101_fpn_1x_coco-42026904.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_1x_coco/20200831_034256.log.json) | -| SABL RetinaNet | R-101-FPN | Y | 1x | N | 40.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/sabl_retinanet_r101_fpn_gn_1x_coco-40a893e8.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_1x_coco/20200830_201422.log.json) | -| SABL RetinaNet | R-101-FPN | Y | 2x | Y (640~800) | 42.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco-1e63382c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco/20200830_144807.log.json) | -| SABL RetinaNet | R-101-FPN | Y | 2x | Y (480~960) | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco-5342f857.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco/20200830_164537.log.json) | diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/detectors/deformable_detr.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/detectors/deformable_detr.py deleted file mode 100644 index 947550fb0ba3976308d9acc95c8d77b07e9dd423..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/detectors/deformable_detr.py +++ /dev/null @@ -1,9 +0,0 @@ -from ..builder import DETECTORS -from .detr import DETR - - -@DETECTORS.register_module() -class DeformableDETR(DETR): - - def __init__(self, *args, **kwargs): - super(DETR, self).__init__(*args, **kwargs) diff --git a/spaces/tonyassi/video-face-swap/DeepFakeAI/uis/components/__init__.py b/spaces/tonyassi/video-face-swap/DeepFakeAI/uis/components/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/trttung1610/musicgen/audiocraft/adversarial/discriminators/msstftd.py b/spaces/trttung1610/musicgen/audiocraft/adversarial/discriminators/msstftd.py deleted file mode 100644 index 81a9100961c7a89a39df2643b24268fb90bfeaa4..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/adversarial/discriminators/msstftd.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -import torchaudio -import torch -from torch import nn -from einops import rearrange - -from ...modules import NormConv2d -from .base import MultiDiscriminator, MultiDiscriminatorOutputType - - -def get_2d_padding(kernel_size: tp.Tuple[int, int], dilation: tp.Tuple[int, int] = (1, 1)): - return (((kernel_size[0] - 1) * dilation[0]) // 2, ((kernel_size[1] - 1) * dilation[1]) // 2) - - -class DiscriminatorSTFT(nn.Module): - """STFT sub-discriminator. - - Args: - filters (int): Number of filters in convolutions. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - n_fft (int): Size of FFT for each scale. - hop_length (int): Length of hop between STFT windows for each scale. - kernel_size (tuple of int): Inner Conv2d kernel sizes. - stride (tuple of int): Inner Conv2d strides. - dilations (list of int): Inner Conv2d dilation on the time dimension. - win_length (int): Window size for each scale. - normalized (bool): Whether to normalize by magnitude after stft. - norm (str): Normalization method. - activation (str): Activation function. - activation_params (dict): Parameters to provide to the activation function. - growth (int): Growth factor for the filters. - """ - def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, - n_fft: int = 1024, hop_length: int = 256, win_length: int = 1024, max_filters: int = 1024, - filters_scale: int = 1, kernel_size: tp.Tuple[int, int] = (3, 9), dilations: tp.List = [1, 2, 4], - stride: tp.Tuple[int, int] = (1, 2), normalized: bool = True, norm: str = 'weight_norm', - activation: str = 'LeakyReLU', activation_params: dict = {'negative_slope': 0.2}): - super().__init__() - assert len(kernel_size) == 2 - assert len(stride) == 2 - self.filters = filters - self.in_channels = in_channels - self.out_channels = out_channels - self.n_fft = n_fft - self.hop_length = hop_length - self.win_length = win_length - self.normalized = normalized - self.activation = getattr(torch.nn, activation)(**activation_params) - self.spec_transform = torchaudio.transforms.Spectrogram( - n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window_fn=torch.hann_window, - normalized=self.normalized, center=False, pad_mode=None, power=None) - spec_channels = 2 * self.in_channels - self.convs = nn.ModuleList() - self.convs.append( - NormConv2d(spec_channels, self.filters, kernel_size=kernel_size, padding=get_2d_padding(kernel_size)) - ) - in_chs = min(filters_scale * self.filters, max_filters) - for i, dilation in enumerate(dilations): - out_chs = min((filters_scale ** (i + 1)) * self.filters, max_filters) - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, - dilation=(dilation, 1), padding=get_2d_padding(kernel_size, (dilation, 1)), - norm=norm)) - in_chs = out_chs - out_chs = min((filters_scale ** (len(dilations) + 1)) * self.filters, max_filters) - self.convs.append(NormConv2d(in_chs, out_chs, kernel_size=(kernel_size[0], kernel_size[0]), - padding=get_2d_padding((kernel_size[0], kernel_size[0])), - norm=norm)) - self.conv_post = NormConv2d(out_chs, self.out_channels, - kernel_size=(kernel_size[0], kernel_size[0]), - padding=get_2d_padding((kernel_size[0], kernel_size[0])), - norm=norm) - - def forward(self, x: torch.Tensor): - fmap = [] - z = self.spec_transform(x) # [B, 2, Freq, Frames, 2] - z = torch.cat([z.real, z.imag], dim=1) - z = rearrange(z, 'b c w t -> b c t w') - for i, layer in enumerate(self.convs): - z = layer(z) - z = self.activation(z) - fmap.append(z) - z = self.conv_post(z) - return z, fmap - - -class MultiScaleSTFTDiscriminator(MultiDiscriminator): - """Multi-Scale STFT (MS-STFT) discriminator. - - Args: - filters (int): Number of filters in convolutions. - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - sep_channels (bool): Separate channels to distinct samples for stereo support. - n_ffts (Sequence[int]): Size of FFT for each scale. - hop_lengths (Sequence[int]): Length of hop between STFT windows for each scale. - win_lengths (Sequence[int]): Window size for each scale. - **kwargs: Additional args for STFTDiscriminator. - """ - def __init__(self, filters: int, in_channels: int = 1, out_channels: int = 1, sep_channels: bool = False, - n_ffts: tp.List[int] = [1024, 2048, 512], hop_lengths: tp.List[int] = [256, 512, 128], - win_lengths: tp.List[int] = [1024, 2048, 512], **kwargs): - super().__init__() - assert len(n_ffts) == len(hop_lengths) == len(win_lengths) - self.sep_channels = sep_channels - self.discriminators = nn.ModuleList([ - DiscriminatorSTFT(filters, in_channels=in_channels, out_channels=out_channels, - n_fft=n_ffts[i], win_length=win_lengths[i], hop_length=hop_lengths[i], **kwargs) - for i in range(len(n_ffts)) - ]) - - @property - def num_discriminators(self): - return len(self.discriminators) - - def _separate_channels(self, x: torch.Tensor) -> torch.Tensor: - B, C, T = x.shape - return x.view(-1, 1, T) - - def forward(self, x: torch.Tensor) -> MultiDiscriminatorOutputType: - logits = [] - fmaps = [] - for disc in self.discriminators: - logit, fmap = disc(x) - logits.append(logit) - fmaps.append(fmap) - return logits, fmaps diff --git a/spaces/trttung1610/musicgen/audiocraft/metrics/rvm.py b/spaces/trttung1610/musicgen/audiocraft/metrics/rvm.py deleted file mode 100644 index 028324529531dd7ee97210dfd890fed717447be0..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/metrics/rvm.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp -import torch -from torch import nn -import torchaudio - - -def db_to_scale(volume: tp.Union[float, torch.Tensor]): - return 10 ** (volume / 20) - - -def scale_to_db(scale: torch.Tensor, min_volume: float = -120): - min_scale = db_to_scale(min_volume) - return 20 * torch.log10(scale.clamp(min=min_scale)) - - -class RelativeVolumeMel(nn.Module): - """Relative volume melspectrogram measure. - - Computes a measure of distance over two mel spectrogram that is interpretable in terms - of decibels. Given `x_ref` and `x_est` two waveforms of shape `[*, T]`, it will - first renormalize both by the ground truth of `x_ref`. - - Then it computes the mel spectrogram `z_ref` and `z_est` and compute volume of the difference - relative to the volume of `z_ref` for each time-frequency bin. It further adds some limits, e.g. - clamping the values between -25 and 25 dB (controlled by `min_relative_volume` and `max_relative_volume`) - with the goal of avoiding the loss being dominated by parts where the reference is almost silent. - Indeed, volumes in dB can take unbounded values both towards -oo and +oo, which can make the final - average metric harder to interpret. Besides, anything below -30 dB of attenuation would sound extremely - good (for a neural network output, although sound engineers typically aim for much lower attenuations). - Similarly, anything above +30 dB would just be completely missing the target, and there is no point - in measuring by exactly how much it missed it. -25, 25 is a more conservative range, but also more - in line with what neural nets currently can achieve. - - For instance, a Relative Volume Mel (RVM) score of -10 dB means that on average, the delta between - the target and reference mel-spec is 10 dB lower than the reference mel-spec value. - - The metric can be aggregated over a given frequency band in order have different insights for - different region of the spectrum. `num_aggregated_bands` controls the number of bands. - - ..Warning:: While this function is optimized for interpretability, nothing was done to ensure it - is numerically stable when computing its gradient. We thus advise against using it as a training loss. - - Args: - sample_rate (int): Sample rate of the input audio. - n_mels (int): Number of mel bands to use. - n_fft (int): Number of frequency bins for the STFT. - hop_length (int): Hop length of the STFT and the mel-spectrogram. - min_relative_volume (float): The error `z_ref - z_est` volume is given relative to - the volume of `z_ref`. If error is smaller than -25 dB of `z_ref`, then it is clamped. - max_relative_volume (float): Same as `min_relative_volume` but clamping if the error is larger than that. - max_initial_gain (float): When rescaling the audio at the very beginning, we will limit the gain - to that amount, to avoid rescaling near silence. Given in dB. - min_activity_volume (float): When computing the reference level from `z_ref`, will clamp low volume - bins to that amount. This is effectively our "zero" level for the reference mel-spectrogram, - and anything below that will be considered equally. - num_aggregated_bands (int): Number of bands to keep when computing the average RVM value. - For instance, a value of 3 would give 3 scores, roughly for low, mid and high freqs. - """ - def __init__(self, sample_rate: int = 24000, n_mels: int = 80, n_fft: int = 512, - hop_length: int = 128, min_relative_volume: float = -25, - max_relative_volume: float = 25, max_initial_gain: float = 25, - min_activity_volume: float = -25, - num_aggregated_bands: int = 4) -> None: - super().__init__() - self.melspec = torchaudio.transforms.MelSpectrogram( - n_mels=n_mels, n_fft=n_fft, hop_length=hop_length, - normalized=True, sample_rate=sample_rate, power=2) - self.min_relative_volume = min_relative_volume - self.max_relative_volume = max_relative_volume - self.max_initial_gain = max_initial_gain - self.min_activity_volume = min_activity_volume - self.num_aggregated_bands = num_aggregated_bands - - def forward(self, estimate: torch.Tensor, ground_truth: torch.Tensor) -> tp.Dict[str, torch.Tensor]: - """Compute RVM metric between estimate and reference samples. - - Args: - estimate (torch.Tensor): Estimate sample. - ground_truth (torch.Tensor): Reference sample. - - Returns: - dict[str, torch.Tensor]: Metrics with keys `rvm` for the overall average, and `rvm_{k}` - for the RVM over the k-th band (k=0..num_aggregated_bands - 1). - """ - min_scale = db_to_scale(-self.max_initial_gain) - std = ground_truth.pow(2).mean().sqrt().clamp(min=min_scale) - z_gt = self.melspec(ground_truth / std).sqrt() - z_est = self.melspec(estimate / std).sqrt() - - delta = z_gt - z_est - ref_db = scale_to_db(z_gt, self.min_activity_volume) - delta_db = scale_to_db(delta.abs(), min_volume=-120) - relative_db = (delta_db - ref_db).clamp(self.min_relative_volume, self.max_relative_volume) - dims = list(range(relative_db.dim())) - dims.remove(dims[-2]) - losses_per_band = relative_db.mean(dim=dims) - aggregated = [chunk.mean() for chunk in losses_per_band.chunk(self.num_aggregated_bands, dim=0)] - metrics = {f'rvm_{index}': value for index, value in enumerate(aggregated)} - metrics['rvm'] = losses_per_band.mean() - return metrics diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_GPT_3.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_GPT_3.py deleted file mode 100644 index 196ba766ee99b9fa3af24cead93cbecc9de63a1b..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_GPT_3.py +++ /dev/null @@ -1,417 +0,0 @@ -''' -改造自 my_GPT2 - -注意 RMSNorm 与 my_GPT 的不一样 - -将 Flash 改造为 my_GPT 那样使用卷积模组 - -与 my_GPT_2 进行比较快速随机序列拟合实验,可以发现本模型比 my_GPT_2 拟合速度快大约1.5倍,好! -但是训练速度要慢一些,大约是0.6x - -''' -import math -import random -import torch -from torch.utils.checkpoint import checkpoint -import torch.nn as nn -import torch.nn.functional as F -import model_utils_torch -from model_utils_torch import make_nlp_self_attn_mask,\ - make_sinusoidal_position_embedding, make_sinusoidal_position_channel_embedding,\ - apply_rotary_position_embedding,\ - T5_RelativePositionEmbedding,\ - weighted_and_neg_topk_cross_entropy -# from model_utils_torch.more_layers.flash_attention_2 import flash_quad_cross_attention, _TensorOptional -import nlg_utils -from typing import Optional - - -_TensorOptional = Optional[torch.Tensor] - - -class RmsNorm(torch.jit.ScriptModule): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.weight = nn.Parameter(torch.ones(dim)) - self.eps = eps - - @torch.jit.script_method - def forward(self, x: torch.Tensor): - y = x / x.square().mean(dim=-1, keepdim=True).add(self.eps).sqrt() * self.weight - return y - - -class MultiGroupConvLayer(torch.jit.ScriptModule): - def __init__(self, in_dim, out_dim, is_group=True): - super().__init__() - assert in_dim % 4 == 0 - assert out_dim % 4 == 0 - in_sub_dim = in_dim // 4 - if is_group: - out_sub_dim = out_dim // 4 - else: - out_sub_dim = out_dim - self.is_group = is_group - self.conv1 = nn.Conv1d(in_sub_dim, out_sub_dim, 2, bias=False) - self.conv2 = nn.Conv1d(in_sub_dim, out_sub_dim, 4, bias=False) - self.conv3 = nn.Conv1d(in_sub_dim, out_sub_dim, 8, bias=False) - self.conv4 = nn.Conv1d(in_sub_dim, out_sub_dim, 16, bias=False) - - @torch.jit.script_method - def forward(self, x: torch.Tensor): - # x shape [B, L, C] - x1, x2, x3, x4 = x.transpose(1, 2).chunk(4, 1) - # xs 4x [B, C//4, L] - - x1 = F.pad(x1, [1, 0], 'replicate') - x2 = F.pad(x2, [3, 0], 'replicate') - x3 = F.pad(x3, [7, 0], 'replicate') - x4 = F.pad(x4, [15, 0], 'replicate') - y1 = self.conv1(x1) / math.sqrt(2) - y2 = self.conv2(x2) / math.sqrt(4) - y3 = self.conv3(x3) / math.sqrt(8) - y4 = self.conv4(x4) / math.sqrt(16) - - if self.is_group: - y = torch.cat([y1,y2,y3,y4], 1) - else: - y = y1 + y2 + y3 + y4 - y = y.transpose(1, 2) - return y - - -@torch.jit.script -def laplacian_attn_fn(x): - """ https://arxiv.org/abs/2209.10655 claims this is more stable than Relu squared """ - mu = math.sqrt(0.5) - std = math.sqrt(0.25 * math.pi) - return (1 + torch.special.erf((x - mu) / (std * math.sqrt(2)))) * 0.5 - - -@torch.jit.script -def flash_quad_cross_attention(q, k, v, attn_mul: _TensorOptional=None, attn_bias: _TensorOptional=None, attn_act_fn: str='laplacian'): - ''' - - :param q: shape [B,...,qL,C] - :param k: shape [B,...,kL,C] - :param v: shape [B,...,qL,C] - :param attn_mul: shape [B,...,qL,kL] - :param attn_bias: shape [B,...,qL,kL] - :param attn_act_fn: - :return: - ''' - assert attn_act_fn in ('laplacian', 'relu2', 'softplus4', 'softmax'), 'Error! Invalid param attn_act_fn: ' + attn_act_fn - a = q @ k.transpose(-1, -2).contiguous() - # qk shape [B, Q, K] - - # scale - # 注意,不要再使用可变化的长度来正则化注意力矩阵,会导致前面的序列被后面的值影响!!! - a /= q.shape[-1] - - if attn_mul is not None: - a *= attn_mul - - if attn_bias is not None: - a += attn_bias - - if attn_act_fn == 'laplacian': - a = laplacian_attn_fn(a) - elif attn_act_fn == 'relu2': - a = F.relu(a) ** 2 - elif attn_act_fn == 'softplus4': - a = F.softplus(a, 4) - elif attn_act_fn == 'softmax': - a = F.softmax(a, -1) - - o = a @ v - return o - - -class FlashQuadSelfAttention(torch.jit.ScriptModule): - __constants__ = ['attn_act_fn'] - - def __init__(self, in_dim, out_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn='laplacian', use_rotary_pos_emb=False, linear_layer_nobias=False, is_group_u=True): - super().__init__() - - self.attn_act_fn = attn_act_fn - self.use_rotary_pos_emb = use_rotary_pos_emb - - self.n_head = n_head - self.expand_head_dim = expand_head_dim - self.squeeze_head_dim = squeeze_head_dim - - expand_dim = n_head * expand_head_dim - squeeze_dim = n_head * squeeze_head_dim - - self.u_m = MultiGroupConvLayer(in_dim, expand_dim, is_group=is_group_u) - - self.vz_dims = (expand_dim, squeeze_dim) - self.vz_m = nn.Linear(in_dim, sum(self.vz_dims), bias=not linear_layer_nobias) - - self.q_gamma = nn.Parameter(torch.rand(squeeze_dim) / math.sqrt(squeeze_dim)) - self.k_gamma = nn.Parameter(torch.rand(squeeze_dim) / math.sqrt(squeeze_dim)) - - self.q_bias = nn.Parameter(torch.zeros(squeeze_dim)) - self.k_bias = nn.Parameter(torch.zeros(squeeze_dim)) - - self.out = nn.Linear(expand_dim, out_dim, bias=not linear_layer_nobias) - - if use_rotary_pos_emb: - self.register_buffer('rotary_ch_emb', make_sinusoidal_position_channel_embedding(squeeze_head_dim, 10000)) - else: - self.register_buffer('rotary_ch_emb', None) - - @torch.jit.script_method - def forward(self, x, attn_mul: _TensorOptional=None, attn_bias: _TensorOptional=None, rotary_pos_start: int=0, rotary_pos_scale: int=1): - B, L, C = x.shape - y = x - - u = self.u_m(y) - - vz = self.vz_m(y) - v, z = torch.split_with_sizes(vz, self.vz_dims, -1) - - q = z * self.q_gamma + self.q_bias - k = z * self.k_gamma + self.k_bias - - q = q.reshape(B, L, self.n_head, self.squeeze_head_dim).transpose(1, 2) - k = k.reshape(B, L, self.n_head, self.squeeze_head_dim).transpose(1, 2) - v = v.reshape(B, L, self.n_head, self.expand_head_dim).transpose(1, 2) - - # u = k.reshape(B, L, self.n_head, self.expand_head_dim).transpose(1, 2) - - if self.use_rotary_pos_emb: - qL, kL = q.shape[-2], k.shape[-2] - pos_emb = make_sinusoidal_position_embedding(max(qL, kL), pos_ch=max(q.shape[-1], k.shape[-1]), pos_start=rotary_pos_start, pos_scale=rotary_pos_scale, device=q.device, ch_emb=self.rotary_ch_emb) - q = apply_rotary_position_embedding(q, pos_emb[:qL, :q.shape[-1]]) - k = apply_rotary_position_embedding(k, pos_emb[:kL, :k.shape[-1]]) - - y = u * flash_quad_cross_attention(q, k, v, attn_mul, attn_bias, self.attn_act_fn).transpose(1, 2).reshape(B, L, self.n_head * self.expand_head_dim) - y = self.out(y) - return y - - -class myT5_DecoderBlock(nn.Module): - def __init__(self, in_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn, use_rotary_pos_emb, is_group_u): - super().__init__() - self.attn_norm = RmsNorm(in_dim) - self.attn = FlashQuadSelfAttention(in_dim, in_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn, use_rotary_pos_emb, True, is_group_u=is_group_u) - - def forward(self, x, x_attn_bias, rotary_pos_start, rotary_pos_scale): - y = x + self.attn(self.attn_norm(x), attn_bias=x_attn_bias, rotary_pos_start=rotary_pos_start, rotary_pos_scale=rotary_pos_scale) - # if y.isnan().sum() > 0: - # print('1', y.shape) - return y - - -class GPT(nn.Module): - def __init__(self, token_dim=128, hidden_dim=512, n_head=8, expand_head_dim=48, squeeze_head_dim=24, attn_act_fn='laplacian', vocab_size=5000, n_decoder=4, - use_rotary_pos_emb=False, use_rel_pos_emb=True, use_random_pos=False, checkpoint_group_size=None, is_group_u=True): - super().__init__() - - self.token_emb = nn.Embedding(vocab_size, hidden_dim) - # 使用 F.normalize 效果更好 - # self.token_emb.weight.data = torch.randn_like(self.token_emb.weight.data) * 0.02 - self.token_emb.weight.data = F.normalize(self.token_emb.weight.data, 2, -1) - - # self.emb_up = nn.Linear(token_dim, hidden_dim) - # self.emb_down = nn.Linear(hidden_dim, token_dim) - - self.use_rotary_pos_emb = use_rotary_pos_emb - self.use_rel_pos_emb = use_rel_pos_emb - self.use_random_pos = use_random_pos - - self.float_min = -torch.inf - self.n_head = n_head - - self.decoders = nn.ModuleList([ - myT5_DecoderBlock(hidden_dim, n_head, expand_head_dim, squeeze_head_dim, attn_act_fn, use_rotary_pos_emb=use_rotary_pos_emb, is_group_u=is_group_u) - for _ in range(n_decoder) - ]) - self.decoder_norm = RmsNorm(hidden_dim) - - # self.lm_head = nn.Linear(hidden_dim, vocab_size, False) - - if use_rel_pos_emb: - self.dec_rel_pos_emb = T5_RelativePositionEmbedding(n_head, 64, 256, bidirectional=False) - - # 使用 checkpoint 节省显存 - if checkpoint_group_size is not None: - checkpoint_group = [] - for idx in range(int(math.ceil(len(self.decoders) / checkpoint_group_size))): - checkpoint_group.append(list(self.decoders[idx:idx+checkpoint_group_size])) - self.__dict__['checkpoint_group'] = checkpoint_group - - # 使用共享嵌入效果更好 - - def decode_func(self, x, x_mask, pos_scale, pos_bias): - # 解码器部分 - _, dec_L = x.shape - - # 生成解码器掩码 - dec_mask_self = make_nlp_self_attn_mask(x_mask, bidirectional=False)[:,None].repeat(1, self.n_head, 1, 1) - # B, H, L, L - - # 应用解码器相对位置偏置 - if self.use_rel_pos_emb: - dec_pos_emb_self = self.dec_rel_pos_emb(dec_L, dec_L, rel_scale=pos_scale, rel_bias=pos_bias).permute(2, 0, 1)[None] - else: - dec_pos_emb_self = 0 - - dec_mask_self = torch.where(dec_mask_self, dec_pos_emb_self, self.float_min) - - y = self.token_emb(x) - # y = self.emb_up(y) - - if y.requires_grad and y.device.type != 'cpu' and self.training and hasattr(self, 'checkpoint_group'): - for g in self.checkpoint_group: - def ckp(y, dec_mask_self, pos_bias, pos_scale): - for m in g: - y = m(y, dec_mask_self, rotary_pos_start=pos_bias, rotary_pos_scale=pos_scale) - return y - y = checkpoint(ckp, y, dec_mask_self, pos_bias, pos_scale, use_reentrant=False, preserve_rng_state=False) - - else: - for m_i, m in enumerate(self.decoders): - ty = m(y, dec_mask_self, rotary_pos_start=pos_bias, rotary_pos_scale=pos_scale) - # if ty.isnan().sum() >0: - # print(ty.shape) - # print(m(y, dec_mask_self, rotary_pos_start=pos_bias, rotary_pos_scale=pos_scale).isnan().sum()) - y = ty - - # y2 = self.emb_down(y2) - dec_out = self.decoder_norm(y) - return dec_out - - def decode_func_cycle(self, x, x_mask, stop_tokens, pos_scale, pos_bias, max_len=512, top_k=10, top_p=0.9, temperature=1.): - # 解码器自解码部分 - # 要求批量为 1 - assert x.shape[0] == x_mask.shape[0] == 1 - assert x_mask.dtype == torch.bool - - x = x[:, x_mask.reshape(-1)] - # shape [1, L] - - out_tokens = torch.zeros([0], dtype=torch.long, device=x.device) - - while True: - cat_x = torch.cat([x, out_tokens[None,]], 1) - cat_x_mask = torch.ones_like(cat_x, dtype=torch.bool) - - dec_out = self.decode_func(cat_x, cat_x_mask, pos_scale, pos_bias) - - out_logit = F.linear(dec_out[0, -1, :], self.token_emb.weight) - # 进行采样 - out_prob = nlg_utils.nlg_softmax_prob(out_logit, temperature) - out_prob = nlg_utils.nlg_prob_decay(out_prob, out_tokens, watch_len=10) - out_char = nlg_utils.nlg_sample(out_prob, top_k, top_p) - - out_tokens = torch.cat([out_tokens, out_char], 0) - - if out_char.item() in stop_tokens or out_tokens.shape[0] >= max_len: - break - - return out_tokens - - def gen(self, x, stop_tokens, max_len, pad_token, x_mask=None, top_k=10, top_p=0.9, temperature=1.): - # 时间步 生成 - if isinstance(stop_tokens, int): - stop_tokens = {stop_tokens} - - with torch.inference_mode(): - pos_scale = 1 - pos_bias = 0 - - if x_mask is None: - x_mask = torch.ones_like(x, dtype=torch.bool) - - out = [] - for x_1, x_mask_1 in zip(x, x_mask): - out_1 = self.decode_func_cycle(x_1[None], x_mask_1[None], stop_tokens=stop_tokens, - pos_scale=pos_scale, pos_bias=pos_bias, max_len=max_len, top_k=top_k, top_p=top_p, temperature=temperature) - out.append(out_1) - - out = torch.nested.as_nested_tensor(out) - out = torch.nested.to_padded_tensor(out, pad_token) - return out - - pred = gen - - def forward(self, x, label=None, label_mask=None, label_weight=None, label_smoothing=0., x_mask=None): - pos_scale = 1 - pos_bias = 0 - if self.training and self.use_random_pos: - # pos_scale = random.randint(1, 8) - pos_bias = random.randint(0, 8) - - if x_mask is None: - x_mask = torch.ones_like(x, dtype=torch.bool) - - dec_out = self.decode_func(x, x_mask, pos_scale=pos_scale, pos_bias=pos_bias) - out = F.linear(dec_out, self.token_emb.weight) - - loss = None - if label is not None: - - assert label.shape == x.shape - - topk = 10 - loss = weighted_and_neg_topk_cross_entropy(out.transpose(1, 2), label.long(), topk, label_weight, label_mask, label_smoothing) - - # with torch.no_grad(): - # print('acc', (out.argmax(2) == label).type(torch.float32).mean().item()) - - return out, loss - - -if __name__ == '__main__': - device = 'cuda:0' - - # 1.13 False - - net = GPT(token_dim=-1, hidden_dim=512, n_head=12, expand_head_dim=64, squeeze_head_dim=22, attn_act_fn='softmax', vocab_size=5000, n_decoder=12, - use_rotary_pos_emb=False, use_rel_pos_emb=True, use_random_pos=True, checkpoint_group_size=None, is_group_u=False).to(device) - model_utils_torch.print_params_size(net) - model_utils_torch.print_buffers_size(net) - - x = torch.as_tensor([[0,1,2,3,4,5,6,7], [1,2,3,4,5,6,7,8]], dtype=torch.long, device=device) - - with torch.no_grad(): - y_vec, _ = net(x) - - with torch.no_grad(): - # 时间步 循环生成 - y_vec = net.gen(x, 0, 12, 0) - - print(y_vec.shape) - - # --------- - from model_utils_torch import Adan - - optim = Adan(net.parameters(), 1e-4) - - # 7.8G 1.13.1 False - # train_xs = torch.randint(1, 100, [5000, 950]).cuda() - # 7.?G -> 5.9G 1.13.1 True - # train_xs = torch.randint(1, 100, [5000, 900]).cuda() - - # 7.9G 2.0.0 False - # train_xs = torch.randint(1, 100, [5000, 950]).cuda() - # 7.9G 2.0.0 True - train_xs = torch.randint(1, 100, [5000, 100]).cuda() - - train_ys = torch.roll(train_xs, -1, 1) - - net.cuda() - - for it in range(200000): - ids = torch.randint(0, 2000, [16]).cuda() - - xs = train_xs[ids] - ys = train_ys[ids] - - y, loss = net(xs, ys) - optim.zero_grad() - loss.backward() - optim.step() - print(it, loss.item()) diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/void_cls.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/void_cls.py deleted file mode 100644 index dbb8593df8ed94faef83f62646d69f45eb31e769..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/void_cls.py +++ /dev/null @@ -1,8 +0,0 @@ -''' -这里定义了一个空类,一般用来当easydict使用 -可以用来当任何东西的填充物。 -''' - - -class VC: - pass diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Cfa Mock Exam Pdf Level 2 WORK.md b/spaces/usbethFlerru/sovits-modelsV2/example/Cfa Mock Exam Pdf Level 2 WORK.md deleted file mode 100644 index c3f14987ab1984c52ddd187a2428305eb585fee8..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Cfa Mock Exam Pdf Level 2 WORK.md +++ /dev/null @@ -1,23 +0,0 @@ - -

                      How to Prepare for the CFA Level 2 Exam with Mock Exams and Practice Questions

                      -

                      The CFA Level 2 exam is a challenging test that requires candidates to apply their knowledge of the curriculum to item-set questions. These questions consist of six multiple-choice questions based on a vignette that describes a scenario related to one or more topics in the curriculum. To answer these questions correctly, candidates need to demonstrate their ability to analyze, synthesize, and evaluate information from various sources.

                      -

                      cfa mock exam pdf level 2


                      Download Filehttps://urlcod.com/2uyVe6



                      -

                      One of the best ways to prepare for the CFA Level 2 exam is to use mock exams and practice questions that simulate the exam format and difficulty. Mock exams and practice questions can help candidates assess their strengths and weaknesses, identify gaps in their understanding, and improve their test-taking skills. In this article, we will discuss how to use mock exams and practice questions effectively for the CFA Level 2 exam preparation.

                      -

                      What are mock exams and practice questions?

                      -

                      Mock exams and practice questions are study tools that are designed to replicate the timing and structure of the CFA Level 2 exam. They are based on the current curriculum and the topic area weights for the exam level. They are also in the same format as the questions on the actual exam, which means they are item-set questions that require candidates to read a vignette and answer six multiple-choice questions related to it.

                      -

                      Mock exams and practice questions are available through the Learning Ecosystem of the CFA Institute for registered candidates. Candidates can access them by logging into their account and selecting the appropriate exam level. The Learning Ecosystem also provides feedback on the answers, including detailed explanations for the correct answer and references to the relevant readings in the curriculum.

                      -

                      How to use mock exams and practice questions?

                      -

                      Mock exams and practice questions can be used at different stages of the CFA Level 2 exam preparation. Here are some tips on how to use them effectively:

                      -

                      -
                        -
                      • Use practice questions throughout your study process. After completing each study session in the curriculum, use the practice questions within each topic to test your understanding and retention of the material. Practice questions can help you evaluate your level of confidence and accuracy for each topic and identify areas that need more review or clarification. You can also use practice questions to reinforce your learning by reviewing them periodically.
                      • -
                      • Use mock exams closer to the exam date. Mock exams are meant to simulate the exam-day experience as closely as possible. They can help you familiarize yourself with the exam format, timing, and difficulty. They can also help you assess your overall readiness for the exam and identify any weak areas that need more attention. Mock exams should be taken under exam conditions, which means you should follow the same rules and guidelines as on the actual exam day. You should also review your performance on mock exams carefully and learn from your mistakes.
                      • -
                      • Use mock exams and practice questions as complements, not substitutes, for your study plan. Mock exams and practice questions are valuable study tools, but they are not enough to prepare you for the CFA Level 2 exam. You should not rely on them as your main source of learning or revision. You should still follow a comprehensive study plan that covers all the topics in the curriculum and uses various resources such as readings, videos, notes, flashcards, etc. Mock exams and practice questions should be used as supplements to your study plan, not replacements for it.
                      • -
                      -

                      Where to find mock exams and practice questions?

                      -

                      The CFA Institute provides mock exams and practice questions for registered candidates through its Learning Ecosystem. Candidates can access them by logging into their account and selecting the appropriate exam level. The Learning Ecosystem also provides feedback on the answers, including detailed explanations for the correct answer and references to the relevant readings in the curriculum.

                      -

                      Besides the CFA Institute, there are also other sources of mock exams and practice questions that candidates can use for their CFA Level 2 exam preparation. Some of these sources include:

                      -
                        -
                      • CFA Program Sample Item-Set Questions. The CFA Institute provides sample item-set questions for each topic in the curriculum on its website[^2^]. These sample questions are similar to those on mock exams and practice questions, but they are not updated regularly.

                        d5da3c52bf
                        -
                        -
                        \ No newline at end of file diff --git a/spaces/user238921933/stable-diffusion-webui/modules/textual_inversion/ui.py b/spaces/user238921933/stable-diffusion-webui/modules/textual_inversion/ui.py deleted file mode 100644 index 5b75f799e745fa693cda06763af80069324a964f..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/textual_inversion/ui.py +++ /dev/null @@ -1,45 +0,0 @@ -import html - -import gradio as gr - -import modules.textual_inversion.textual_inversion -import modules.textual_inversion.preprocess -from modules import sd_hijack, shared - - -def create_embedding(name, initialization_text, nvpt, overwrite_old): - filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, overwrite_old, init_text=initialization_text) - - sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() - - return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", "" - - -def preprocess(*args): - modules.textual_inversion.preprocess.preprocess(*args) - - return f"Preprocessing {'interrupted' if shared.state.interrupted else 'finished'}.", "" - - -def train_embedding(*args): - - assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible' - - apply_optimizations = shared.opts.training_xattention_optimizations - try: - if not apply_optimizations: - sd_hijack.undo_optimizations() - - embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args) - - res = f""" -Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps. -Embedding saved to {html.escape(filename)} -""" - return res, "" - except Exception: - raise - finally: - if not apply_optimizations: - sd_hijack.apply_optimizations() - diff --git a/spaces/victorbahlangene/Star-wars-app/app.py b/spaces/victorbahlangene/Star-wars-app/app.py deleted file mode 100644 index 3522749c2388dc553feb902325cf501ddfc6bf67..0000000000000000000000000000000000000000 --- a/spaces/victorbahlangene/Star-wars-app/app.py +++ /dev/null @@ -1,212 +0,0 @@ -from pathlib import Path -from fastai.vision.all import * -from bs4 import BeautifulSoup -import requests -import re -import nltk -from pprint import pprint -import random -import streamlit as st -import pathlib - -plt =platform.system() - -if plt == "Windows": pathlib.WindowsPath = pathlib.PosixPath - - - -# Add random seed for answer location # -random.seed(42) - -# app layout # -st.set_page_config( - page_title="Star Wars Character App" -) - -#st.sidebar.success("Select a page above.") - -# set session_state for character name prediction # -if "char_label" not in st.session_state: - st.session_state["char_label"] = "" - -# session_state for quiz tests # -if "quiz_corpus" not in st.session_state: - st.session_state["quiz_corpus"] = "" - -# make prediction function # - - -def make_pred(_model, _image): - """ - we return the predicted label to be used later - """ - - label, idx, preds = _model.predict(_image) - print( - f"This is a picture of {label}, model is {preds[idx] * 100:.2f}% sure.") - st.write( - f"This is a picture of {label}, model is {preds[idx] * 100:.2f}% sure.") - - return label - - -# Load Model function # -@st.experimental_singleton -def load_model(): - path = Path() - learn_inf = load_learner(path / 'star-wars-characters-model_res34.pkl') - - return learn_inf - -# scrape web data for summary function # - - -@st.experimental_singleton -def srape_wiki(star_wars_character): - - url = "https://starwars.fandom.com/wiki/"+star_wars_character - print(url) - r = requests.get(url) - soup = BeautifulSoup(r.text, "html.parser") - - div_top = soup.find("div", class_="quote") - div_bot = soup.find("div", id="toc", class_="toc") - - content = "" - item = div_top.nextSibling - - while item != div_bot: - content += str(item) - item = item.nextSibling - - # beautify content # - content_b = BeautifulSoup(content, "html.parser") - - text_arr = [] - for sentence in content_b.find_all("p"): - text_arr.append(sentence.text.strip()) - - # make text one continous string # - text_str = " ".join(text_arr) - - # remove '\n' # - text_str = text_str.split("\n") - text_str = " ".join(text_str) - - # remove brackets and all inside them # - corpus = re.sub(r'\[.*?\]', "", text_str) - - return corpus - -# make quiz prediction # - - -@st.experimental_singleton -def model_predict(payload): - nltk.download('universal_tagset') - nltk.download('stopwords') - from Questgen import main - # load t5 model # - qg_mcq = main.QGen() - model_out = qg_mcq.predict_mcq(payload) - - for i in model_out["questions"]: - i["options"].insert(random.randint(0, 3), i["answer"]) - - return model_out - - -st.markdown("

                        Star Wars Character App

                        ", - unsafe_allow_html=True) - -# st.markdown("

                        Character Classification

                        ", -# unsafe_allow_html=True) - -# containers # -col1, col2, col3 = st.columns(3) - -# loading fastai model # -learn_inf = load_model() - -# CLASSIFICATION SECTION # -with st.expander("Image Classification"): - st.markdown("

                        Character Classification

                        ", - unsafe_allow_html=True) - - # upload image # - uploaded_file1 = st.file_uploader( - "Upload Star wars character", type=['png', 'jpeg', 'jpg']) - - if uploaded_file1 is not None: - image_file1 = PILImage.create((uploaded_file1)) - # with st.expander("See Image"): - st.image(image_file1.to_thumb(200, 200), - caption='Uploaded Image') - - with st.form(key="image_class"): - - classify_img = st.form_submit_button("Submit") - if classify_img: - # with st.expander("See explanation"): - # st.image(image_file1.to_thumb(200, 200), caption='Uploaded Image') - st.session_state["char_label"] = make_pred(learn_inf, image_file1) - -st.markdown("

                        ", unsafe_allow_html=True) - -# SUMMARY SECTION # -with st.expander("Summary"): - st.markdown("

                        Character Summary

                        ", - unsafe_allow_html=True) - st.write("Summary of: ", st.session_state["char_label"]) - try: - st.session_state["quiz_corpus"] = srape_wiki( - st.session_state["char_label"]) - st.write(st.session_state["quiz_corpus"]) - except AttributeError: - st.error( - "Please choose a different variation of the character name") - out_text_area = st.text_input( - "Charater name", st.session_state["char_label"]) - - with st.form(key="summary"): - #st.write("Summary of ", st.session_state["char_label"]) - summary = st.form_submit_button("Summary") - if summary: - st.write(out_text_area) - st.session_state["char_label"] = out_text_area - st.session_state["quiz_corpus"] = srape_wiki( - st.session_state["char_label"]) - st.write(st.session_state["quiz_corpus"]) - - -st.markdown("

                        ", unsafe_allow_html=True) - -# QUIZ SECTION # -with st.expander("Quiz"): - st.markdown("

                        Character Quiz

                        ", - unsafe_allow_html=True) - - payload = { - "input_text": st.session_state["quiz_corpus"] - } - - try: - model_output = model_predict(payload) - for i in model_output["questions"]: - with st.form(key=str(i["id"])): - st.write(f"Question {i['id']}") - entry = st.radio(label=i["question_statement"], - options=(i["options"]), key=str(i["id"])) - - checkbox_val = st.checkbox("Do you want a clue?") - - submitted = st.form_submit_button(label='Submit') - if submitted: - if i["answer"] == entry: - st.write("CORRECT!") - else: - st.write("Wrong, check clue") - if checkbox_val: - st.write(i["context"]) - except KeyError: - print("error caught") diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/apis/__init__.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/apis/__init__.py deleted file mode 100644 index 170724be38de42daf2bc1a1910e181d68818f165..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/apis/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .inference import inference_segmentor, init_segmentor, show_result_pyplot -from .test import multi_gpu_test, single_gpu_test -from .train import get_root_logger, set_random_seed, train_segmentor - -__all__ = [ - 'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor', - 'inference_segmentor', 'multi_gpu_test', 'single_gpu_test', - 'show_result_pyplot' -] diff --git a/spaces/wazhendeshiniya/White-box-Cartoonization/wbc/guided_filter.py b/spaces/wazhendeshiniya/White-box-Cartoonization/wbc/guided_filter.py deleted file mode 100644 index fd019d145efc7f308cd96de90f4e7b648f6820b4..0000000000000000000000000000000000000000 --- a/spaces/wazhendeshiniya/White-box-Cartoonization/wbc/guided_filter.py +++ /dev/null @@ -1,87 +0,0 @@ -import tensorflow as tf -import numpy as np - - - - -def tf_box_filter(x, r): - k_size = int(2*r+1) - ch = x.get_shape().as_list()[-1] - weight = 1/(k_size**2) - box_kernel = weight*np.ones((k_size, k_size, ch, 1)) - box_kernel = np.array(box_kernel).astype(np.float32) - output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME') - return output - - - -def guided_filter(x, y, r, eps=1e-2): - - x_shape = tf.shape(x) - #y_shape = tf.shape(y) - - N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r) - - mean_x = tf_box_filter(x, r) / N - mean_y = tf_box_filter(y, r) / N - cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y - var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x - - A = cov_xy / (var_x + eps) - b = mean_y - A * mean_x - - mean_A = tf_box_filter(A, r) / N - mean_b = tf_box_filter(b, r) / N - - output = mean_A * x + mean_b - - return output - - - -def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8): - - #assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4 - - lr_x_shape = tf.shape(lr_x) - #lr_y_shape = tf.shape(lr_y) - hr_x_shape = tf.shape(hr_x) - - N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r) - - mean_x = tf_box_filter(lr_x, r) / N - mean_y = tf_box_filter(lr_y, r) / N - cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y - var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x - - A = cov_xy / (var_x + eps) - b = mean_y - A * mean_x - - mean_A = tf.image.resize_images(A, hr_x_shape[1: 3]) - mean_b = tf.image.resize_images(b, hr_x_shape[1: 3]) - - output = mean_A * hr_x + mean_b - - return output - - -if __name__ == '__main__': - import cv2 - from tqdm import tqdm - - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - #input_superpixel = tf.placeholder(tf.float32, [16, 256, 256, 3]) - output = guided_filter(input_photo, input_photo, 5, eps=1) - image = cv2.imread('output_figure1/cartoon2.jpg') - image = image/127.5 - 1 - image = np.expand_dims(image, axis=0) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - sess.run(tf.global_variables_initializer()) - - out = sess.run(output, feed_dict={input_photo: image}) - out = (np.squeeze(out)+1)*127.5 - out = np.clip(out, 0, 255).astype(np.uint8) - cv2.imwrite('output_figure1/cartoon2_filter.jpg', out) diff --git a/spaces/whitphx/gradio-static-test/dist/assets/index-fa13b0d6.js b/spaces/whitphx/gradio-static-test/dist/assets/index-fa13b0d6.js deleted file mode 100644 index 6011fc6cd1511434039f876e66abf1861058d5dc..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/index-fa13b0d6.js +++ /dev/null @@ -1,2 +0,0 @@ -import{E as W,C as Y,L as d}from"./index-46909c92.js";import{s as n,t as r,L as R,i as Z,d as a,f as X,a as y,b as f}from"./index-1040e6d9.js";import"../lite.js";import"./Blocks-99723874.js";import"./Button-0391b19a.js";import"./BlockLabel-a3ec523d.js";import"./Empty-91947ea3.js";/* empty css */import"./Copy-d654b047.js";import"./Download-35908774.js";const l=1,w=189,S=190,b=191,T=192,U=193,m=194,V=22,g=23,h=47,G=48,c=53,u=54,_=55,j=57,E=58,k=59,z=60,v=61,H=63,N=230,A=71,F=255,K=121,C=142,D=143,M=146,i=10,s=13,t=32,o=9,q=35,L=40,B=46,J=new Set([g,h,G,F,H,K,u,_,N,z,v,E,k,A,C,D,M]),OO=new W((O,$)=>{if(O.next<0)O.acceptToken(m);else if(!(O.next!=i&&O.next!=s))if($.context.depth<0)O.acceptToken(T,1);else{O.advance();let Q=0;for(;O.next==t||O.next==o;)O.advance(),Q++;let P=O.next==i||O.next==s||O.next==q;O.acceptToken(P?U:b,-Q)}},{contextual:!0,fallback:!0}),$O=new W((O,$)=>{let Q=$.context.depth;if(Q<0)return;let P=O.peek(-1);if((P==i||P==s)&&$.context.depth>=0){let e=0,x=0;for(;;){if(O.next==t)e++;else if(O.next==o)e+=8-e%8;else break;O.advance(),x++}e!=Q&&O.next!=i&&O.next!=s&&O.next!=q&&(e{for(let $=0;$<5;$++){if(O.next!="print".charCodeAt($))return;O.advance()}if(!/\w/.test(String.fromCharCode(O.next)))for(let $=0;;$++){let Q=O.peek($);if(!(Q==t||Q==o)){Q!=L&&Q!=B&&Q!=i&&Q!=s&&Q!=q&&O.acceptToken(l);return}}}),iO=n({'async "*" "**" FormatConversion FormatSpec':r.modifier,"for while if elif else try except finally return raise break continue with pass assert await yield match case":r.controlKeyword,"in not and or is del":r.operatorKeyword,"from def class global nonlocal lambda":r.definitionKeyword,import:r.moduleKeyword,"with as print":r.keyword,Boolean:r.bool,None:r.null,VariableName:r.variableName,"CallExpression/VariableName":r.function(r.variableName),"FunctionDefinition/VariableName":r.function(r.definition(r.variableName)),"ClassDefinition/VariableName":r.definition(r.className),PropertyName:r.propertyName,"CallExpression/MemberExpression/PropertyName":r.function(r.propertyName),Comment:r.lineComment,Number:r.number,String:r.string,FormatString:r.special(r.string),UpdateOp:r.updateOperator,ArithOp:r.arithmeticOperator,BitOp:r.bitwiseOperator,CompareOp:r.compareOperator,AssignOp:r.definitionOperator,Ellipsis:r.punctuation,At:r.meta,"( )":r.paren,"[ ]":r.squareBracket,"{ }":r.brace,".":r.derefOperator,", ;":r.separator}),sO={__proto__:null,await:40,or:50,and:52,in:56,not:58,is:60,if:66,else:68,lambda:72,yield:90,from:92,async:98,for:100,None:152,True:154,False:154,del:168,pass:172,break:176,continue:180,return:184,raise:192,import:196,as:198,global:202,nonlocal:204,assert:208,elif:218,while:222,try:228,except:230,finally:232,with:236,def:240,class:250,match:261,case:267},oO=d.deserialize({version:14,states:"!L`O`Q$IXOOO%fQ$I[O'#G|OOQ$IS'#Cm'#CmOOQ$IS'#Cn'#CnO'UQ$IWO'#ClO(wQ$I[O'#G{OOQ$IS'#G|'#G|OOQ$IS'#DS'#DSOOQ$IS'#G{'#G{O)eQ$IWO'#CsO)uQ$IWO'#DdO*VQ$IWO'#DhOOQ$IS'#Ds'#DsO*jO`O'#DsO*rOpO'#DsO*zO!bO'#DtO+VO#tO'#DtO+bO&jO'#DtO+mO,UO'#DtO-oQ$I[O'#GmOOQ$IS'#Gm'#GmO'UQ$IWO'#GlO/RQ$I[O'#GlOOQ$IS'#E]'#E]O/jQ$IWO'#E^OOQ$IS'#Gk'#GkO/tQ$IWO'#GjOOQ$IV'#Gj'#GjO0PQ$IWO'#FPOOQ$IS'#GX'#GXO0UQ$IWO'#FOOOQ$IV'#Hx'#HxOOQ$IV'#Gi'#GiOOQ$IT'#Fh'#FhQ`Q$IXOOO'UQ$IWO'#CoO0dQ$IWO'#C{O0kQ$IWO'#DPO0yQ$IWO'#HQO1ZQ$I[O'#EQO'UQ$IWO'#EROOQ$IS'#ET'#ETOOQ$IS'#EV'#EVOOQ$IS'#EX'#EXO1oQ$IWO'#EZO2VQ$IWO'#E_O0PQ$IWO'#EaO2jQ$I[O'#EaO0PQ$IWO'#EdO/jQ$IWO'#EgO/jQ$IWO'#EkO/jQ$IWO'#EnO2uQ$IWO'#EpO2|Q$IWO'#EuO3XQ$IWO'#EqO/jQ$IWO'#EuO0PQ$IWO'#EwO0PQ$IWO'#E|O3^Q$IWO'#FROOQ$IS'#Cc'#CcOOQ$IS'#Cd'#CdOOQ$IS'#Ce'#CeOOQ$IS'#Cf'#CfOOQ$IS'#Cg'#CgOOQ$IS'#Ch'#ChOOQ$IS'#Cj'#CjO'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O3eQ$IWO'#DmOOQ$IS,5:W,5:WO3xQ$IWO'#H[OOQ$IS,5:Z,5:ZO4VQ%1`O,5:ZO4[Q$I[O,59WO0dQ$IWO,59`O0dQ$IWO,59`O0dQ$IWO,59`O6zQ$IWO,59`O7PQ$IWO,59`O7WQ$IWO,59hO7_Q$IWO'#G{O8eQ$IWO'#GzOOQ$IS'#Gz'#GzOOQ$IS'#DY'#DYO8|Q$IWO,59_O'UQ$IWO,59_O9[Q$IWO,59_O9aQ$IWO,5:PO'UQ$IWO,5:POOQ$IS,5:O,5:OO9oQ$IWO,5:OO9tQ$IWO,5:VO'UQ$IWO,5:VO'UQ$IWO,5:TOOQ$IS,5:S,5:SO:VQ$IWO,5:SO:[Q$IWO,5:UOOOO'#Fp'#FpO:aO`O,5:_OOQ$IS,5:_,5:_OOOO'#Fq'#FqO:iOpO,5:_O:qQ$IWO'#DuOOOO'#Fr'#FrO;RO!bO,5:`OOQ$IS,5:`,5:`OOOO'#Fu'#FuO;^O#tO,5:`OOOO'#Fv'#FvO;iO&jO,5:`OOOO'#Fw'#FwO;tO,UO,5:`OOQ$IS'#Fx'#FxOqQ$I[O,5=WO?[Q%GlO,5=WO?{Q$I[O,5=WOOQ$IS,5:x,5:xO@dQ$IXO'#GQOAsQ$IWO,5;TOOQ$IV,5=U,5=UOBOQ$I[O'#HtOBgQ$IWO,5;kOOQ$IS-E:V-E:VOOQ$IV,5;j,5;jO3SQ$IWO'#EwOOQ$IT-E9f-E9fOBoQ$I[O,59ZODvQ$I[O,59gOEaQ$IWO'#G}OElQ$IWO'#G}O0PQ$IWO'#G}OEwQ$IWO'#DROFPQ$IWO,59kOFUQ$IWO'#HRO'UQ$IWO'#HRO/jQ$IWO,5=lOOQ$IS,5=l,5=lO/jQ$IWO'#D|OOQ$IS'#D}'#D}OFsQ$IWO'#FzOGTQ$IWO,58zOGTQ$IWO,58zO)hQ$IWO,5:jOGcQ$I[O'#HTOOQ$IS,5:m,5:mOOQ$IS,5:u,5:uOGvQ$IWO,5:yOHXQ$IWO,5:{OOQ$IS'#F}'#F}OHgQ$I[O,5:{OHuQ$IWO,5:{OHzQ$IWO'#HwOOQ$IS,5;O,5;OOIYQ$IWO'#HsOOQ$IS,5;R,5;RO3XQ$IWO,5;VO3XQ$IWO,5;YOIkQ$I[O'#HyO'UQ$IWO'#HyOIuQ$IWO,5;[O2uQ$IWO,5;[O/jQ$IWO,5;aO0PQ$IWO,5;cOIzQ$IXO'#ElOKTQ$IZO,5;]ONiQ$IWO'#HzO3XQ$IWO,5;aONtQ$IWO,5;cONyQ$IWO,5;hO! RQ$I[O,5;mO'UQ$IWO,5;mO!#uQ$I[O1G.hO!#|Q$I[O1G.hO!&mQ$I[O1G.hO!&wQ$I[O1G.hO!)bQ$I[O1G.hO!)uQ$I[O1G.hO!*YQ$IWO'#HZO!*hQ$I[O'#GmO/jQ$IWO'#HZO!*rQ$IWO'#HYOOQ$IS,5:X,5:XO!*zQ$IWO,5:XO!+PQ$IWO'#H]O!+[Q$IWO'#H]O!+oQ$IWO,5=vOOQ$IS'#Dq'#DqOOQ$IS1G/u1G/uOOQ$IS1G.z1G.zO!,oQ$I[O1G.zO!,vQ$I[O1G.zO0dQ$IWO1G.zO!-cQ$IWO1G/SOOQ$IS'#DX'#DXO/jQ$IWO,59rOOQ$IS1G.y1G.yO!-jQ$IWO1G/cO!-zQ$IWO1G/cO!.SQ$IWO1G/dO'UQ$IWO'#HSO!.XQ$IWO'#HSO!.^Q$I[O1G.yO!.nQ$IWO,59gO!/tQ$IWO,5=rO!0UQ$IWO,5=rO!0^Q$IWO1G/kO!0cQ$I[O1G/kOOQ$IS1G/j1G/jO!0sQ$IWO,5=mO!1jQ$IWO,5=mO/jQ$IWO1G/oO!2XQ$IWO1G/qO!2^Q$I[O1G/qO!2nQ$I[O1G/oOOQ$IS1G/n1G/nOOQ$IS1G/p1G/pOOOO-E9n-E9nOOQ$IS1G/y1G/yOOOO-E9o-E9oO!3OQ$IWO'#HhO/jQ$IWO'#HhO!3^Q$IWO,5:aOOOO-E9p-E9pOOQ$IS1G/z1G/zOOOO-E9s-E9sOOOO-E9t-E9tOOOO-E9u-E9uOOQ$IS-E9v-E9vO!3iQ%GlO1G2rO!4YQ$I[O1G2rO'UQ$IWO,5`OOQ$IS1G1V1G1VO!5YQ$IWO1G1VOOQ$IS'#DT'#DTO/jQ$IWO,5=iOOQ$IS,5=i,5=iO!5_Q$IWO'#FiO!5jQ$IWO,59mO!5rQ$IWO1G/VO!5|Q$I[O,5=mOOQ$IS1G3W1G3WOOQ$IS,5:h,5:hO!6mQ$IWO'#GlOOQ$IS,5cO!8oQ$IWO,5>cO!8}Q$IWO,5>_O!9eQ$IWO,5>_O!9vQ$IZO1G0qO!=XQ$IZO1G0tO!@gQ$IWO,5>eO!@qQ$IWO,5>eO!@yQ$I[O,5>eO/jQ$IWO1G0vO!ATQ$IWO1G0vO3XQ$IWO1G0{ONtQ$IWO1G0}OOQ$IV,5;W,5;WO!AYQ$IYO,5;WO!A_Q$IZO1G0wO!DsQ$IWO'#GUO3XQ$IWO1G0wO3XQ$IWO1G0wO!EQQ$IWO,5>fO!E_Q$IWO,5>fO0PQ$IWO,5>fOOQ$IV1G0{1G0{O!EgQ$IWO'#EyO!ExQ%1`O1G0}OOQ$IV1G1S1G1SO3XQ$IWO1G1SO!FQQ$IWO'#FTOOQ$IV1G1X1G1XO! RQ$I[O1G1XOOQ$IS,5=u,5=uOOQ$IS'#Dn'#DnO/jQ$IWO,5=uO!FVQ$IWO,5=tO!FjQ$IWO,5=tOOQ$IS1G/s1G/sO!FrQ$IWO,5=wO!GSQ$IWO,5=wO!G[Q$IWO,5=wO!GoQ$IWO,5=wO!HPQ$IWO,5=wOOQ$IS1G3b1G3bOOQ$IS7+$f7+$fO!5rQ$IWO7+$nO!IrQ$IWO1G.zO!IyQ$IWO1G.zOOQ$IS1G/^1G/^OOQ$IS,5SO!NaQ$IWO,5>SO!NaQ$IWO,5>SO!NoO!LQO'#DwO!NzOSO'#HiOOOO1G/{1G/{O# PQ$IWO1G/{O# XQ%GlO7+(^O# xQ$I[O1G2PP#!cQ$IWO'#FyOOQ$IS,5T,5>TOOOO7+%g7+%gO#8UQ$IWO1G2rO#8oQ$IWO1G2rP'UQ$IWO'#FlO/jQ$IWO<bO#9cQ$IWO,5>bO0PQ$IWO,5>bO#9tQ$IWO,5>aOOQ$IS<hO#CeQ$IWO,5>hOOQ$IS,5>h,5>hO#CpQ$IWO,5>gO#DRQ$IWO,5>gOOQ$IS1G1P1G1POOQ$IS,5;g,5;gO#DZQ$IWO1G1ZP#D`Q$IWO'#FnO#DpQ$IWO1G1uO#ETQ$IWO1G1uO#EeQ$IWO1G1uP#EpQ$IWO'#FoO#E}Q$IWO7+(}O#F_Q$IWO7+(}O#F_Q$IWO7+(}O#FgQ$IWO7+(}O#FwQ$IWO7+(tO7WQ$IWO7+(tOOQ$ISAN>TAN>TO#GbQ$IWO<aAN>aO/jQ$IWO1G1sO#GrQ$I[O1G1sP#G|Q$IWO'#FmOOQ$IS1G1y1G1yP#HZQ$IWO'#FsO#HhQ$IWO7+)YOOOO-E9r-E9rO#IOQ$IWO7+(^OOQ$ISAN?VAN?VO#IiQ$IWO,5jO$,bQ$IWO,5>jO0PQ$IWO,5;vO$,sQ$IWO,5;zO$,xQ$IWO,5;zO#NzQ$IWO'#IQO$,}Q$IWO'#IQO$-SQ$IWO,5;{OOQ$IS,5;|,5;|O'UQ$IWO'#FgOOQ$IU1G1[1G1[O3XQ$IWO1G1[OOQ$ISAN@gAN@gO$-XQ$IWOG27oO$-iQ$IWO,59{OOQ$IS1G3[1G3[OOQ$IS,5lO#NzQ$IWO,5>lOOQ$IS1G1g1G1gO$0YQ$I[O,5mO$0hQ$IWO,5>mOOQ$IS1G1j1G1jOOQ$IS7+&y7+&yP#NzQ$IWO'#G_O$0pQ$IWO1G4WO$0zQ$IWO1G4WO$1SQ$IWO1G4WOOQ$IS7+%R7+%RO$1bQ$IWO1G1kO$1pQ$I[O'#FWO$1wQ$IWO,5m'PP>pP>vByFcPFw'PPPPF{GR&wP&w&wP&wP&wP&wP&wP&w&w&wP&wPP&wPP&wPGXPG`GfPG`PG`G`PPPG`PIePInItIzIePG`JQPG`PJXJ_PJcJwKfLPJcJcLVLdJcJcJcJcLxMOMRMWMZMaMgMsNVN]NgNm! Z! a! g! m! w! }!!T!!Z!!a!!g!!y!#T!#Z!#a!#g!#q!#w!#}!$T!$Z!$e!$k!$u!${!%U!%[!%k!%s!%}!&UPPPPPPPPP!&[!&d!&m!&w!'SPPPPPPPPPPPP!+r!,[!0j!3vPP!4O!4^!4g!5]!5S!5f!5l!5o!5r!5u!5}!6nPPPPPPPPPP!6q!6tPPPPPPPPP!6z!7W!7d!7j!7s!7v!7|!8S!8Y!8]P!8e!8n!9j!9m]iOr#n$n)c+c'udOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/x}!dP#j#w$Y$h$t%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!P!eP#j#w$Y$h$t$v%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!R!fP#j#w$Y$h$t$v$w%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!T!gP#j#w$Y$h$t$v$w$x%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!V!hP#j#w$Y$h$t$v$w$x$y%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!X!iP#j#w$Y$h$t$v$w$x$y$z%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!]!iP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m'uSOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/x&ZUOXYZhrtv|}!R!S!T!X!j!l!p!q!r!t!u#^#k#n$Q$S$U$X$l$n%O%T%[%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.b.y/i/j/k/l/n/o/p/q/t/x%eWOXYZhrv|}!R!S!T!X!j!l#^#k#n$Q$S$U$X$l$n%O%T%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,S,U,W,Y,Z,],o,q,x,{-n-o.b/o/p/qQ#}uQ.c-sR/u/w'ldOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xW#ql!O!P$`W#yu&b-s/wQ$b!QQ$r!YQ$s!ZW$}!j'h*O+vS&a#z#{Q'R$mQ(l&ZQ(z&qU({&s(|(}U)O&u)P+RQ)n'[W)o'^+q,s-]S+p)p)qY,_*|,`-T-U-wQ,b+OQ,l+gQ,n+il-`,w-f-g-i.R.T.Y.p.u.z/P/[/a/dQ-v-SQ.Z-hQ.g-{Q.r.VU/V.{/Y/bX/]/Q/^/e/fR&`#yi!xXY!S!T%a%h'y(R)V*]*`*bR%_!wQ!|XQ%z#^Q&i$UR&l$XT-r-O.y![!kP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/mQ&^#rR'a$sR'g$}Q%W!nR.e-y'tcOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xS#hc#i!P-d,w-f-g-h-i-{.R.T.Y.p.u.z.{/P/Q/Y/[/^/a/b/d/e/f'tcOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xT#hc#iS#__#`S#b`#cS#da#eS#fb#gT*t(e*uT(f%z(hQ$WwR+o)oX$Uw$V$W&kZkOr$n)c+cXoOr)c+cQ$o!WQ&y$fQ&z$gQ']$qQ'`$sQ)a'QQ)g'VQ)i'WQ)j'XQ)w'_Q)y'aQ+V)VQ+X)WQ+Y)XQ+^)_S+`)b)xQ+d)eQ+e)fQ+f)hQ,d+UQ,e+WQ,g+_Q,h+aQ,m+hQ-W,fQ-Y,kQ-Z,lQ-x-XQ._-lR.x.`WoOr)c+cR#tnQ'_$rR)b'RQ+n)oR,q+oQ)x'_R+a)bZmOnr)c+cQ'c$tR){'dT,u+u,vu-k,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dt-k,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dQ.Z-hX/]/Q/^/e/f!P-c,w-f-g-h-i-{.R.T.Y.p.u.z.{/P/Q/Y/[/^/a/b/d/e/fQ.O-bR.l.Pg.R-e.S.h.o.t/S/U/W/c/g/hu-j,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dX-|-`-j.g/VR.i-{V/X.{/Y/bR.`-lQrOR#vrQ&c#|R(q&cS%n#R$OS(Y%n(]T(]%q&eQ%b!zQ%i!}W'z%b%i(P(TQ(P%fR(T%kQ&n$YR(w&nQ(`%rQ*g(ZT*m(`*gQ'i%PR*P'iS'l%S%TY*T'l*U+|,|-pU*U'm'n'oU+|*V*W*XS,|+},OR-p,}Q#Y]R%u#YQ#]^R%w#]Q#`_R%{#`Q(c%xS*r(c*sR*s(dQ*u(eR,[*uQ#c`R%}#cQ#eaR&O#eQ#gbR&P#gQ#icR&Q#iQ#lfQ&S#jW&V#l&S(t*yQ(t&hR*y/mQ$VwS&j$V&kR&k$WQ&x$dR)T&xQ&[#qR(m&[Q$`!PR&r$`Q*}({S,a*}-VR-V,bQ&v$bR)Q&vQ#ojR&X#oQ+c)cR,i+cQ)U&yR+T)UQ&|$hS)]&|)^R)^&}Q'U$oR)d'UQ'Z$pS)m'Z+lR+l)nQ+r)sR,t+rWnOr)c+cR#snQ,v+uR-^,vd.S-e.h.o.t/S/U/W/c/g/hR.n.SU-z-`.g/VR.f-zQ/R.tS/_/R/`R/`/SS.|.h.iR/Z.|Q.U-eR.q.USqOrT+b)c+cWpOr)c+cR'S$nYjOr$n)c+cR&W#n[wOr#n$n)c+cR&i$U&YPOXYZhrtv|}!R!S!T!X!j!l!p!q!r!t!u#^#k#n$Q$S$U$X$l$n%O%T%[%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.b.y/i/j/k/l/n/o/p/q/t/xQ!oSQ#jeQ#wsU$Yx%d'}S$h!U$kQ$t![Q$v!dQ$w!eQ$x!fQ$y!gQ$z!hQ${!iQ%f!{Q%k#OQ%q#SQ%r#TQ&e$PQ&}$iQ'd$uQ(j&TU(u&m(v*zW)Y&{)[+[+]Q*Z'wQ*d(WQ+Z)ZQ,V*lQ.w.^R/m/rQ!zXQ!}YQ$f!SQ$g!T^'v%a%h'y(R*]*`*bR+W)V[fOr#n$n)c+ch!wXY!S!T%a%h'y(R)V*]*`*bQ#RZQ#mhS$Ov|Q$]}W$d!R$X'O)`S$p!X$lW$|!j'h*O+vQ%S!lQ%x#^`&U#k&R(i(k(s*x,]/qQ&f$QQ&g$SQ&h$UQ'e%OQ'o%TQ'u%_W(V%m(X*e*iQ(Z%oQ(d%yQ(o&`S(r&d/oQ(x&oQ(y&pU)R&w)S+SQ)h'VY)k'Y)l+j+k,oQ)|'g^*Q'k*S+z+{,{-o.bQ*W'mQ*X'nS*Y'r/pW*k(_*f,S,WW*o(b*q,Y,ZQ+t)vQ+y*RQ+}*VQ,X*pQ,^*{Q,p+nQ,y+wQ,z+xQ,},OQ-R,UQ-[,qQ-m,xR.a-nhTOr#k#n$n&R&d'r(i(k)c+c$z!vXYZhv|}!R!S!T!X!j!l#^$Q$S$U$X$l%O%T%_%a%h%m%o%y&`&o&p&w'O'V'Y'g'h'k'm'n'y(R(X(_(b(s)S)V)`)l)v*O*R*S*V*]*`*b*e*f*i*p*q*x*{+S+j+k+n+v+w+x+z+{,O,S,U,W,Y,Z,],o,q,x,{-n-o.b/o/p/qQ#xtW%X!p!t/j/tQ%Y!qQ%Z!rQ%]!uQ%g/iS'q%[/nQ's/kQ't/lQ,P*^Q-Q,QS-q-O.yR/v/xU#|u-s/wR(p&b[gOr#n$n)c+cX!yX#^$U$XQ#WZQ$RvR$[|Q%c!zQ%j!}Q%p#RQ'e$|Q(Q%fQ(U%kQ(^%qQ(a%rQ*h(ZQ-P,PQ-u-QR.d-tQ$ZxQ'|%dR*_'}Q-t-OR/T.yR#QYR#VZR%R!jQ%P!jV)}'h*O+v!]!mP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/mR%U!lR%z#^Q(g%zR*w(hQ$e!RQ&l$XQ)_'OR+_)`Q#rlQ$^!OQ$a!PR&t$`Q(z&sR+Q(}Q(z&sQ+P(|R+Q(}R$c!QXpOr)c+cQ$j!UR'P$kQ$q!XR'Q$lR)u'^Q)s'^V,r+q,s-]Q-l,wQ.W-fR.X-gU-e,w-f-gQ.]-iQ.h-{Q.m.RU.o.T.p/PQ.t.YQ/S.uQ/U.zU/W.{/Y/bQ/c/[Q/g/aR/h/dR.[-hR.j-{",nodeNames:"⚠ print Comment Script AssignStatement * BinaryExpression BitOp BitOp BitOp BitOp ArithOp ArithOp @ ArithOp ** UnaryExpression ArithOp BitOp AwaitExpression await ) ( ParenthesizedExpression BinaryExpression or and CompareOp in not is UnaryExpression ConditionalExpression if else LambdaExpression lambda ParamList VariableName AssignOp , : NamedExpression AssignOp YieldExpression yield from TupleExpression ComprehensionExpression async for LambdaExpression ] [ ArrayExpression ArrayComprehensionExpression } { DictionaryExpression DictionaryComprehensionExpression SetExpression SetComprehensionExpression CallExpression ArgList AssignOp MemberExpression . PropertyName Number String FormatString FormatReplacement FormatConversion FormatSpec ContinuedString Ellipsis None Boolean TypeDef AssignOp UpdateStatement UpdateOp ExpressionStatement DeleteStatement del PassStatement pass BreakStatement break ContinueStatement continue ReturnStatement return YieldStatement PrintStatement RaiseStatement raise ImportStatement import as ScopeStatement global nonlocal AssertStatement assert StatementGroup ; IfStatement Body elif WhileStatement while ForStatement TryStatement try except finally WithStatement with FunctionDefinition def ParamList AssignOp TypeDef ClassDefinition class DecoratedStatement Decorator At MatchStatement match MatchBody MatchClause case CapturePattern LiteralPattern ArithOp ArithOp AsPattern OrPattern LogicOp AttributePattern SequencePattern MappingPattern StarPattern ClassPattern PatternArgList KeywordPattern KeywordPattern Guard",maxTerm:267,context:PO,nodeProps:[["group",-14,4,80,82,83,85,87,89,91,93,94,95,97,100,103,"Statement Statement",-22,6,16,19,23,38,47,48,54,55,58,59,60,61,62,65,68,69,70,74,75,76,77,"Expression",-10,105,107,110,112,113,117,119,124,126,129,"Statement",-9,134,135,138,139,141,142,143,144,145,"Pattern"],["openedBy",21,"(",52,"[",56,"{"],["closedBy",22,")",53,"]",57,"}"]],propSources:[iO],skippedNodes:[0,2],repeatNodeCount:38,tokenData:"&JdMgR!^OX$}XY!&]Y[$}[]!&]]p$}pq!&]qr!(grs!,^st!IYtu$}uv$5[vw$7nwx$8zxy%'vyz%(|z{%*S{|%,r|}%.O}!O%/U!O!P%1k!P!Q%UZ&^7[&WW&f#tOr(}rs)}sw(}wx>wx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}:Y?QX&^7[&WW&f#tOr>wrs?ms#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:Y?rX&^7[Or>wrs@_s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:Y@dX&^7[Or>wrs-}s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:YAUT&^7[O#o>w#o#p6g#p#q>w#q#r6g#r~>w`x#O!`x#O!gZ&WW&R,XOY!wZ]!Ad]^>w^r!Adrs!Bhs#O!Ad#O#P!C[#P#o!Ad#o#p!9f#p#q!Ad#q#r!7x#r~!AdEc!BoX&^7[&R,XOr>wrs@_s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>wEc!CaT&^7[O#o!Ad#o#p!7x#p#q!Ad#q#r!7x#r~!AdGZ!CuT&^7[O#o!-l#o#p!DU#p#q!-l#q#r!DU#r~!-l0}!De]&TS&WW&R,X&Z`&d!b&f#tOY!DUYZAyZ]!DU]^Ay^r!DUrs!E^sw!DUwx!5tx#O!DU#O#P!FU#P#o!DU#o#p!F[#p~!DU0}!EiX&TS&R,X&Z`&d!bOrAyrsCiswAywx5Px#OAy#O#PEo#P#oAy#o#pEu#p~Ay0}!FXPO~!DU0}!Fe]&TS&WW&R,XOY!`x#O!`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p#0Y#p~#=d2P#=mZQ1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#>gZQ1s&TSOY#=dYZ:{Z]#=d]^:{^r#=drs#?Ysw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#?aZQ1s&TSOY#=dYZ:{Z]#=d]^:{^r#=drs#,zsw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#@ZZQ1s&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@|x#O#=d#O#P#Av#P~#=d2P#ATZQ1s&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#9bx#O#=d#O#P#Av#P~#=d2P#A{TQ1sOY#=dYZ:{Z]#=d]^:{^~#=dLe#Bg_Q1s&^7[&WW&f#tOY!NdYZ(}Z]!Nd]^(}^r!Ndrs# rsw!Ndwx#Cfx#O!Nd#O#P#/f#P#o!Nd#o#p#wZ]#Cf]^>w^r#Cfrs#Djs#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#Dq]Q1s&^7[OY#CfYZ>wZ]#Cf]^>w^r#Cfrs#Ejs#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#Eq]Q1s&^7[OY#CfYZ>wZ]#Cf]^>w^r#Cfrs#'[s#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#FqXQ1s&^7[OY#CfYZ>wZ]#Cf]^>w^#o#Cf#o#p#5h#p#q#Cf#q#r#5h#r~#CfLu#GeXQ1s&^7[OY!KxYZ'PZ]!Kx]^'P^#o!Kx#o#p#HQ#p#q!Kx#q#r#HQ#r~!Kx6i#Ha]Q1s&TS&WW&Z`&d!b&f#tOY#HQYZAyZ]#HQ]^Ay^r#HQrs#IYsw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ6i#Ie]Q1s&TS&Z`&d!bOY#HQYZAyZ]#HQ]^Ay^r#HQrs#J^sw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ6i#Ji]Q1s&TS&Z`&d!bOY#HQYZAyZ]#HQ]^Ay^r#HQrs#Kbsw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ3k#KmZQ1s&TS&Z`&d!bOY#KbYZD_Z]#Kb]^D_^w#Kbwx#)|x#O#Kb#O#P#L`#P#o#Kb#o#p#Lt#p~#Kb3k#LeTQ1sOY#KbYZD_Z]#Kb]^D_^~#Kb3k#L{ZQ1s&TSOY#,zYZ1OZ]#,z]^1O^w#,zwx#-nx#O#,z#O#P#/Q#P#o#,z#o#p#Kb#p~#,z6i#MsTQ1sOY#HQYZAyZ]#HQ]^Ay^~#HQ6i#N]]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p#HQ#p~#=dLu$ c_Q1s&^7[&TS&Z`&d!bOY!KxYZ'PZ]!Kx]^'P^r!Kxrs$!bsw!Kxwx!MYx#O!Kx#O#P#G^#P#o!Kx#o#p#NS#p#q!Kx#q#r#HQ#r~!KxIw$!o]Q1s&^7[&TS&Z`&d!bOY$!bYZGgZ]$!b]^Gg^w$!bwx#%[x#O$!b#O#P$#h#P#o$!b#o#p#Lt#p#q$!b#q#r#Kb#r~$!bIw$#oXQ1s&^7[OY$!bYZGgZ]$!b]^Gg^#o$!b#o#p#Kb#p#q$!b#q#r#Kb#r~$!bMV$$i_Q1s&^7[&WW&ap&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$.px#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hMV$%y_Q1s&^7[&TS&WW&ap&d!b&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$$[x#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hMV$'PXQ1s&^7[OY$%hYZIqZ]$%h]^Iq^#o$%h#o#p$'l#p#q$%h#q#r$'l#r~$%h6y$'{]Q1s&TS&WW&ap&d!b&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$(tx#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l6y$)P]Q1s&WW&ap&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$)xx#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l6y$*T]Q1s&WW&ap&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$*|x#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l5c$+XZQ1s&WW&ap&f#tOY$*|YZMmZ]$*|]^Mm^r$*|rs#6ds#O$*|#O#P$+z#P#o$*|#o#p$,`#p~$*|5c$,PTQ1sOY$*|YZMmZ]$*|]^Mm^~$*|5c$,gZQ1s&WWOY#9bYZ8tZ]#9b]^8t^r#9brs#:Us#O#9b#O#P#;h#P#o#9b#o#p$*|#p~#9b6y$-_TQ1sOY$'lYZKXZ]$'l]^KX^~$'l6y$-w]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p$'l#p~#=dMV$.}_Q1s&^7[&WW&ap&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$/|x#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hKo$0Z]Q1s&^7[&WW&ap&f#tOY$/|YZ!!uZ]$/|]^!!u^r$/|rs#Djs#O$/|#O#P$1S#P#o$/|#o#p$,`#p#q$/|#q#r$*|#r~$/|Ko$1ZXQ1s&^7[OY$/|YZ!!uZ]$/|]^!!u^#o$/|#o#p$*|#p#q$/|#q#r$*|#r~$/|Mg$1}XQ1s&^7[OY!IYYZ$}Z]!IY]^$}^#o!IY#o#p$2j#p#q!IY#q#r$2j#r~!IY7Z$2{]Q1s&TS&WW&Z`&ap&d!b&f#tOY$2jYZ!$gZ]$2j]^!$g^r$2jrs#IYsw$2jwx$(tx#O$2j#O#P$3t#P#o$2j#o#p$4Y#p~$2j7Z$3yTQ1sOY$2jYZ!$gZ]$2j]^!$g^~$2j7Z$4c]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p$2j#p~#=dGz$5o]%jQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz$6{Z!s,W&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz$8R]%dQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{$9Z_&_`&^7[&WW&R,X&ap&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx%%zx#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YGk$:k_&^7[&TS&WW&R,X&ap&d!b&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx% ^x#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YFy$;u_&^7[&TS&R,X&d!bOY$Sx#O$Sx#O$_Z&^7[&WW&R,X&f#tOr(}rs)}sw(}wx={x#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}Fy$?VT&^7[O#o$Sx#O$T!Q!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz%>h]%kQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%?tu!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%BX!P!Q$}!Q![%Cc![!d$}!d!e%Ee!e!g$}!g!h%7Z!h!l$}!l!m%;k!m!q$}!q!r%H_!r!z$}!z!{%KR!{#O$}#O#P!$R#P#R$}#R#S%Cc#S#U$}#U#V%Ee#V#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#c$}#c#d%H_#d#l$}#l#m%KR#m#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Bj]&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%5_![#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Cvi!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%BX!P!Q$}!Q![%Cc![!g$}!g!h%7Z!h!l$}!l!m%;k!m#O$}#O#P!$R#P#R$}#R#S%Cc#S#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Ev`&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!R%Fx!R!S%Fx!S#O$}#O#P!$R#P#R$}#R#S%Fx#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%G]`!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!R%Fx!R!S%Fx!S#O$}#O#P!$R#P#R$}#R#S%Fx#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Hp_&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!Y%Io!Y#O$}#O#P!$R#P#R$}#R#S%Io#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%JS_!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!Y%Io!Y#O$}#O#P!$R#P#R$}#R#S%Io#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Kdc&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%Lo![!c$}!c!i%Lo!i#O$}#O#P!$R#P#R$}#R#S%Lo#S#T$}#T#Z%Lo#Z#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%MSc!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%Lo![!c$}!c!i%Lo!i#O$}#O#P!$R#P#R$}#R#S%Lo#S#T$}#T#Z%Lo#Z#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Mg%Nr]y1s&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`& k!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}x!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#f&+n#f#g&>x#g#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nGZ&9gZ&^7[&TS&Z`&d!b&`,XOr'Prs&:Ysw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PGZ&:eZ&^7[&TS&Z`&d!bOr'Prs&;Wsw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PD]&;eX&^7[&TS&e,X&Z`&d!bOwGgwx,kx#OGg#O#PH_#P#oGg#o#pET#p#qGg#q#rD_#r~GgGk&<_Z&^7[&WW&ap&f#t&Y,XOrIqrs)}swIqwx&=Qx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqGk&=]Z&^7[&WW&ap&f#tOrIqrs)}swIqwx&>Ox#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqFT&>]X&^7[&WW&c,X&ap&f#tOr!!urs?ms#O!!u#O#P!#m#P#o!!u#o#pNc#p#q!!u#q#rMm#r~!!uMg&?_c&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&9Ysw$}wx&x!i!t&+n!t!u&5j!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#U&+n#U#V&5j#V#Y&+n#Y#Z&>x#Z#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nG{&CXZ!V,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}sO[O]||-1}],tokenPrec:7282});function I(O,$){let Q=O.lineIndent($.from),P=O.lineAt(O.pos,-1),e=P.from+P.text.length;return!/\S/.test(P.text)&&O.node.toQ?null:Q+O.unit}const aO=R.define({name:"python",parser:oO.configure({props:[Z.add({Body:O=>{var $;return($=I(O,O.node))!==null&&$!==void 0?$:O.continue()},IfStatement:O=>/^\s*(else:|elif )/.test(O.textAfter)?O.baseIndent:O.continue(),TryStatement:O=>/^\s*(except |finally:|else:)/.test(O.textAfter)?O.baseIndent:O.continue(),"TupleExpression ComprehensionExpression ParamList ArgList ParenthesizedExpression":a({closing:")"}),"DictionaryExpression DictionaryComprehensionExpression SetExpression SetComprehensionExpression":a({closing:"}"}),"ArrayExpression ArrayComprehensionExpression":a({closing:"]"}),"String FormatString":()=>null,Script:O=>{if(O.pos+/\s*/.exec(O.textAfter)[0].length>=O.node.to){let $=null;for(let Q=O.node,P=Q.to;Q=Q.lastChild,!(!Q||Q.to!=P);)Q.type.name=="Body"&&($=Q);if($){let Q=I(O,$);if(Q!=null)return Q}}return O.continue()}}),X.add({"ArrayExpression DictionaryExpression SetExpression TupleExpression":y,Body:(O,$)=>({from:O.from+1,to:O.to-(O.to==$.doc.length?0:1)})})]}),languageData:{closeBrackets:{brackets:["(","[","{","'",'"',"'''",'"""'],stringPrefixes:["f","fr","rf","r","u","b","br","rb","F","FR","RF","R","U","B","BR","RB"]},commentTokens:{line:"#"},indentOnInput:/^\s*([\}\]\)]|else:|elif |except |finally:)$/}});function nO(){return new f(aO)}export{nO as python,aO as pythonLanguage}; -//# sourceMappingURL=index-fa13b0d6.js.map diff --git a/spaces/wing-nus/SciAssist/description.py b/spaces/wing-nus/SciAssist/description.py deleted file mode 100644 index 6247c8feccab83d7461087ead4572115e34bba13..0000000000000000000000000000000000000000 --- a/spaces/wing-nus/SciAssist/description.py +++ /dev/null @@ -1,68 +0,0 @@ -# Reference string parsing Markdown -rsp_str_md = ''' -To **test on strings**, simply input one or more strings. -''' - -rsp_file_md = ''' -To **test on a file**, the input can be: - -- A txt file which contains a reference string in each line. - -- A pdf file which contains a whole scientific documention without any preprocessing(including title, author, body text...). - -''' -# - A pdf file which contains a whole scientific document without any processing (including title, author...). - -ssum_str_md = ''' -To **test on strings**, simply input a string. - -''' - -ssum_file_md = ''' -To **test on a file**, the input can be: - -- A txt file which contains the content to be summarized. - -- A pdf file which contains a whole scientific documention without any preprocessing(including title, author, body text...). - - -''' - -# - The **number of beams** should be **divisible** by the **number of generated summaries** for group beam search. -ctrlsum_str_md = ''' -To **test on strings**, simply input a string. - -**Note**: - -- Length 0 will exert no control over length. - - -''' - -ctrlsum_file_md = ''' -This is the demo for **CocoSciSum**. - -To **test on a file**, the input can be: - -- A txt file which contains the content to be summarized. - -- A pdf file which contains a whole scientific documention without any preprocessing(including title, author, body text...). - - - -''' - - - -de_str_md = ''' -To **test on strings**, please input your sentences or paragraphs. -''' - -de_file_md = ''' -To **test on a file**, the input can be: - -- A txt file which contains the content to be extracted dataset mentions from. - -- A pdf file which contains a whole scientific documention without any preprocessing (including title, author, body text...). - -''' diff --git a/spaces/xiang-wuu/yolov5/export.py b/spaces/xiang-wuu/yolov5/export.py deleted file mode 100644 index 4846624541e47b920d5837ef1103e3f3e4155d1e..0000000000000000000000000000000000000000 --- a/spaces/xiang-wuu/yolov5/export.py +++ /dev/null @@ -1,618 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit - -Format | `export.py --include` | Model ---- | --- | --- -PyTorch | - | yolov5s.pt -TorchScript | `torchscript` | yolov5s.torchscript -ONNX | `onnx` | yolov5s.onnx -OpenVINO | `openvino` | yolov5s_openvino_model/ -TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel -TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ -TensorFlow GraphDef | `pb` | yolov5s.pb -TensorFlow Lite | `tflite` | yolov5s.tflite -TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite -TensorFlow.js | `tfjs` | yolov5s_web_model/ - -Requirements: - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU - -Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... - -Inference: - $ python path/to/detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - -TensorFlow.js: - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model - $ npm start -""" - -import argparse -import json -import os -import platform -import subprocess -import sys -import time -import warnings -from pathlib import Path - -import pandas as pd -import torch -import yaml -from torch.utils.mobile_optimizer import optimize_for_mobile - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.experimental import attempt_load -from models.yolo import Detect -from utils.dataloaders import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, - file_size, print_args, url2file) -from utils.torch_utils import select_device - - -def export_formats(): - # YOLOv5 export formats - x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) - - -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): - # YOLOv5 TorchScript model export - try: - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') - - -def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): - # YOLOv5 ONNX export - try: - check_requirements(('onnx',)) - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') - - -def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export - try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') - - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): - # YOLOv5 CoreML export - try: - check_requirements(('coremltools',)) - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return ct_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None - - -def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - prefix = colorstr('TensorRT:') - try: - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') - for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFDetect, TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) - if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return keras_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None - - -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): - # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') - - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): - # YOLOv5 TensorFlow Lite export - try: - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): - # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - try: - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -o {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): - # YOLOv5 TensorFlow.js export - try: - check_requirements(('tensorflowjs',)) - import re - - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - with open(f_json) as j: - json = j.read() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - -@torch.no_grad() -def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF/TensorRT: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold -): - t = time.time() - include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments - flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights - - # Load PyTorch model - device = select_device(device) - if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' - model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model - nc, names = model.nc, model.names # number of classes, class names - - # Checks - imgsz *= 2 if len(imgsz) == 1 else 1 # expand - assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}' - if optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' - - # Input - gs = int(max(model.stride)) # grid size (max stride) - imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection - - # Update model - model.train() if train else model.eval() # training mode = no Detect() layer grid construction - for k, m in model.named_modules(): - if isinstance(m, Detect): - m.inplace = inplace - m.onnx_dynamic = dynamic - m.export = True - - for _ in range(2): - y = model(im) # dry runs - if half and not coreml: - im, model = im.half(), model.half() # to FP16 - shape = tuple(y[0].shape) # model output shape - LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") - - # Exports - f = [''] * 10 # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: - f[0] = export_torchscript(model, im, file, optimize) - if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX - f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) - if xml: # OpenVINO - f[3] = export_openvino(model, file, half) - if coreml: - _, f[4] = export_coreml(model, im, file, int8, half) - - # TensorFlow Exports - if any((saved_model, pb, tflite, edgetpu, tfjs)): - if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) - if pb or tfjs: # pb prerequisite to tfjs - f[6] = export_pb(model, file) - if tflite or edgetpu: - f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8] = export_edgetpu(file) - if tfjs: - f[9] = export_tfjs(file) - - # Finish - f = [str(x) for x in f if x] # filter out '' and None - if any(f): - h = '--half' if half else '' # --half FP16 inference arg - LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]} {h}" - f"\nValidate: python val.py --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" - f"\nVisualize: https://netron.app") - return f # return list of exported files/dirs - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', - nargs='+', - default=['torchscript', 'onnx'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/xiaoxuezi/spleeter/spleeter/dataset.py b/spaces/xiaoxuezi/spleeter/spleeter/dataset.py deleted file mode 100644 index b73e4143f8624cdd0ef29f0ce2b9cf8ddd5c2f80..0000000000000000000000000000000000000000 --- a/spaces/xiaoxuezi/spleeter/spleeter/dataset.py +++ /dev/null @@ -1,625 +0,0 @@ -#!/usr/bin/env python -# coding: utf8 - -""" - Module for building data preprocessing pipeline using the tensorflow - data API. Data preprocessing such as audio loading, spectrogram - computation, cropping, feature caching or data augmentation is done - using a tensorflow dataset object that output a tuple (input_, output) - where: - - - input is a dictionary with a single key that contains the (batched) - mix spectrogram of audio samples - - output is a dictionary of spectrogram of the isolated tracks - (ground truth) -""" - -import os -import time -from os.path import exists -from os.path import sep as SEPARATOR -from typing import Any, Dict, Optional - -# pyright: reportMissingImports=false -# pylint: disable=import-error -import tensorflow as tf - -from .audio.adapter import AudioAdapter -from .audio.convertor import db_uint_spectrogram_to_gain, spectrogram_to_db_uint -from .audio.spectrogram import ( - compute_spectrogram_tf, - random_pitch_shift, - random_time_stretch, -) -from .utils.logging import logger -from .utils.tensor import ( - check_tensor_shape, - dataset_from_csv, - set_tensor_shape, - sync_apply, -) - -# pylint: enable=import-error - -__email__ = "spleeter@deezer.com" -__author__ = "Deezer Research" -__license__ = "MIT License" - -# Default audio parameters to use. -DEFAULT_AUDIO_PARAMS: Dict = { - "instrument_list": ("vocals", "accompaniment"), - "mix_name": "mix", - "sample_rate": 44100, - "frame_length": 4096, - "frame_step": 1024, - "T": 512, - "F": 1024, -} - - -def get_training_dataset( - audio_params: Dict, audio_adapter: AudioAdapter, audio_path: str -) -> Any: - """ - Builds training dataset. - - Parameters: - audio_params (Dict): - Audio parameters. - audio_adapter (AudioAdapter): - Adapter to load audio from. - audio_path (str): - Path of directory containing audio. - - Returns: - Any: - Built dataset. - """ - builder = DatasetBuilder( - audio_params, - audio_adapter, - audio_path, - chunk_duration=audio_params.get("chunk_duration", 20.0), - random_seed=audio_params.get("random_seed", 0), - ) - return builder.build( - audio_params.get("train_csv"), - cache_directory=audio_params.get("training_cache"), - batch_size=audio_params.get("batch_size"), - n_chunks_per_song=audio_params.get("n_chunks_per_song", 2), - random_data_augmentation=False, - convert_to_uint=True, - wait_for_cache=False, - ) - - -def get_validation_dataset( - audio_params: Dict, audio_adapter: AudioAdapter, audio_path: str -) -> Any: - """ - Builds validation dataset. - - Parameters: - audio_params (Dict): - Audio parameters. - audio_adapter (AudioAdapter): - Adapter to load audio from. - audio_path (str): - Path of directory containing audio. - - Returns: - Any: - Built dataset. - """ - builder = DatasetBuilder( - audio_params, audio_adapter, audio_path, chunk_duration=12.0 - ) - return builder.build( - audio_params.get("validation_csv"), - batch_size=audio_params.get("batch_size"), - cache_directory=audio_params.get("validation_cache"), - convert_to_uint=True, - infinite_generator=False, - n_chunks_per_song=1, - # should not perform data augmentation for eval: - random_data_augmentation=False, - random_time_crop=False, - shuffle=False, - ) - - -class InstrumentDatasetBuilder(object): - """ Instrument based filter and mapper provider. """ - - def __init__(self, parent, instrument) -> None: - """ - Default constructor. - - Parameters: - parent: - Parent dataset builder. - instrument: - Target instrument. - """ - self._parent = parent - self._instrument = instrument - self._spectrogram_key = f"{instrument}_spectrogram" - self._min_spectrogram_key = f"min_{instrument}_spectrogram" - self._max_spectrogram_key = f"max_{instrument}_spectrogram" - - def load_waveform(self, sample): - """ Load waveform for given sample. """ - return dict( - sample, - **self._parent._audio_adapter.load_tf_waveform( - sample[f"{self._instrument}_path"], - offset=sample["start"], - duration=self._parent._chunk_duration, - sample_rate=self._parent._sample_rate, - waveform_name="waveform", - ), - ) - - def compute_spectrogram(self, sample): - """ Compute spectrogram of the given sample. """ - return dict( - sample, - **{ - self._spectrogram_key: compute_spectrogram_tf( - sample["waveform"], - frame_length=self._parent._frame_length, - frame_step=self._parent._frame_step, - spec_exponent=1.0, - window_exponent=1.0, - ) - }, - ) - - def filter_frequencies(self, sample): - """ """ - return dict( - sample, - **{ - self._spectrogram_key: sample[self._spectrogram_key][ - :, : self._parent._F, : - ] - }, - ) - - def convert_to_uint(self, sample): - """ Convert given sample from float to unit. """ - return dict( - sample, - **spectrogram_to_db_uint( - sample[self._spectrogram_key], - tensor_key=self._spectrogram_key, - min_key=self._min_spectrogram_key, - max_key=self._max_spectrogram_key, - ), - ) - - def filter_infinity(self, sample): - """ Filter infinity sample. """ - return tf.logical_not(tf.math.is_inf(sample[self._min_spectrogram_key])) - - def convert_to_float32(self, sample): - """ Convert given sample from unit to float. """ - return dict( - sample, - **{ - self._spectrogram_key: db_uint_spectrogram_to_gain( - sample[self._spectrogram_key], - sample[self._min_spectrogram_key], - sample[self._max_spectrogram_key], - ) - }, - ) - - def time_crop(self, sample): - """ """ - - def start(sample): - """ mid_segment_start """ - return tf.cast( - tf.maximum( - tf.shape(sample[self._spectrogram_key])[0] / 2 - - self._parent._T / 2, - 0, - ), - tf.int32, - ) - - return dict( - sample, - **{ - self._spectrogram_key: sample[self._spectrogram_key][ - start(sample) : start(sample) + self._parent._T, :, : - ] - }, - ) - - def filter_shape(self, sample): - """ Filter badly shaped sample. """ - return check_tensor_shape( - sample[self._spectrogram_key], - (self._parent._T, self._parent._F, self._parent._n_channels), - ) - - def reshape_spectrogram(self, sample): - """ Reshape given sample. """ - return dict( - sample, - **{ - self._spectrogram_key: set_tensor_shape( - sample[self._spectrogram_key], - (self._parent._T, self._parent._F, self._parent._n_channels), - ) - }, - ) - - -class DatasetBuilder(object): - """ - TO BE DOCUMENTED. - """ - - MARGIN: float = 0.5 - """ Margin at beginning and end of songs in seconds. """ - - WAIT_PERIOD: int = 60 - """ Wait period for cache (in seconds). """ - - def __init__( - self, - audio_params: Dict, - audio_adapter: AudioAdapter, - audio_path: str, - random_seed: int = 0, - chunk_duration: float = 20.0, - ) -> None: - """ - Default constructor. - - NOTE: Probably need for AudioAdapter. - - Parameters: - audio_params (Dict): - Audio parameters to use. - audio_adapter (AudioAdapter): - Audio adapter to use. - audio_path (str): - random_seed (int): - chunk_duration (float): - """ - # Length of segment in frames (if fs=22050 and - # frame_step=512, then T=512 corresponds to 11.89s) - self._T = audio_params["T"] - # Number of frequency bins to be used (should - # be less than frame_length/2 + 1) - self._F = audio_params["F"] - self._sample_rate = audio_params["sample_rate"] - self._frame_length = audio_params["frame_length"] - self._frame_step = audio_params["frame_step"] - self._mix_name = audio_params["mix_name"] - self._n_channels = audio_params["n_channels"] - self._instruments = [self._mix_name] + audio_params["instrument_list"] - self._instrument_builders = None - self._chunk_duration = chunk_duration - self._audio_adapter = audio_adapter - self._audio_params = audio_params - self._audio_path = audio_path - self._random_seed = random_seed - - self.check_parameters_compatibility() - - def check_parameters_compatibility(self): - if self._frame_length / 2 + 1 < self._F: - raise ValueError( - "F is too large and must be set to at most frame_length/2+1. Decrease F or increase frame_length to fix." - ) - - if ( - self._chunk_duration * self._sample_rate - self._frame_length - ) / self._frame_step < self._T: - raise ValueError( - "T is too large considering STFT parameters and chunk duratoin. Make sure spectrogram time dimension of chunks is larger than T (for instance reducing T or frame_step or increasing chunk duration)." - ) - - def expand_path(self, sample): - """ Expands audio paths for the given sample. """ - return dict( - sample, - **{ - f"{instrument}_path": tf.strings.join( - (self._audio_path, sample[f"{instrument}_path"]), SEPARATOR - ) - for instrument in self._instruments - }, - ) - - def filter_error(self, sample): - """ Filter errored sample. """ - return tf.logical_not(sample["waveform_error"]) - - def filter_waveform(self, sample): - """ Filter waveform from sample. """ - return {k: v for k, v in sample.items() if not k == "waveform"} - - def harmonize_spectrogram(self, sample): - """ Ensure same size for vocals and mix spectrograms. """ - - def _reduce(sample): - return tf.reduce_min( - [ - tf.shape(sample[f"{instrument}_spectrogram"])[0] - for instrument in self._instruments - ] - ) - - return dict( - sample, - **{ - f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"][ - : _reduce(sample), :, : - ] - for instrument in self._instruments - }, - ) - - def filter_short_segments(self, sample): - """ Filter out too short segment. """ - return tf.reduce_any( - [ - tf.shape(sample[f"{instrument}_spectrogram"])[0] >= self._T - for instrument in self._instruments - ] - ) - - def random_time_crop(self, sample): - """ Random time crop of 11.88s. """ - return dict( - sample, - **sync_apply( - { - f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] - for instrument in self._instruments - }, - lambda x: tf.image.random_crop( - x, - (self._T, len(self._instruments) * self._F, self._n_channels), - seed=self._random_seed, - ), - ), - ) - - def random_time_stretch(self, sample): - """ Randomly time stretch the given sample. """ - return dict( - sample, - **sync_apply( - { - f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] - for instrument in self._instruments - }, - lambda x: random_time_stretch(x, factor_min=0.9, factor_max=1.1), - ), - ) - - def random_pitch_shift(self, sample): - """ Randomly pitch shift the given sample. """ - return dict( - sample, - **sync_apply( - { - f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] - for instrument in self._instruments - }, - lambda x: random_pitch_shift(x, shift_min=-1.0, shift_max=1.0), - concat_axis=0, - ), - ) - - def map_features(self, sample): - """ Select features and annotation of the given sample. """ - input_ = { - f"{self._mix_name}_spectrogram": sample[f"{self._mix_name}_spectrogram"] - } - output = { - f"{instrument}_spectrogram": sample[f"{instrument}_spectrogram"] - for instrument in self._audio_params["instrument_list"] - } - return (input_, output) - - def compute_segments(self, dataset: Any, n_chunks_per_song: int) -> Any: - """ - Computes segments for each song of the dataset. - - Parameters: - dataset (Any): - Dataset to compute segments for. - n_chunks_per_song (int): - Number of segment per song to compute. - - Returns: - Any: - Segmented dataset. - """ - if n_chunks_per_song <= 0: - raise ValueError("n_chunks_per_song must be positif") - datasets = [] - for k in range(n_chunks_per_song): - if n_chunks_per_song > 1: - datasets.append( - dataset.map( - lambda sample: dict( - sample, - start=tf.maximum( - k - * ( - sample["duration"] - - self._chunk_duration - - 2 * self.MARGIN - ) - / (n_chunks_per_song - 1) - + self.MARGIN, - 0, - ), - ) - ) - ) - elif n_chunks_per_song == 1: # Take central segment. - datasets.append( - dataset.map( - lambda sample: dict( - sample, - start=tf.maximum( - sample["duration"] / 2 - self._chunk_duration / 2, 0 - ), - ) - ) - ) - dataset = datasets[-1] - for d in datasets[:-1]: - dataset = dataset.concatenate(d) - return dataset - - @property - def instruments(self) -> Any: - """ - Instrument dataset builder generator. - - Yields: - Any: - InstrumentBuilder instance. - """ - if self._instrument_builders is None: - self._instrument_builders = [] - for instrument in self._instruments: - self._instrument_builders.append( - InstrumentDatasetBuilder(self, instrument) - ) - for builder in self._instrument_builders: - yield builder - - def cache(self, dataset: Any, cache: str, wait: bool) -> Any: - """ - Cache the given dataset if cache is enabled. Eventually waits for - cache to be available (useful if another process is already - computing cache) if provided wait flag is `True`. - - Parameters: - dataset (Any): - Dataset to be cached if cache is required. - cache (str): - Path of cache directory to be used, None if no cache. - wait (bool): - If caching is enabled, True is cache should be waited. - - Returns: - Any: - Cached dataset if needed, original dataset otherwise. - """ - if cache is not None: - if wait: - while not exists(f"{cache}.index"): - logger.info(f"Cache not available, wait {self.WAIT_PERIOD}") - time.sleep(self.WAIT_PERIOD) - cache_path = os.path.split(cache)[0] - os.makedirs(cache_path, exist_ok=True) - return dataset.cache(cache) - return dataset - - def build( - self, - csv_path: str, - batch_size: int = 8, - shuffle: bool = True, - convert_to_uint: bool = True, - random_data_augmentation: bool = False, - random_time_crop: bool = True, - infinite_generator: bool = True, - cache_directory: Optional[str] = None, - wait_for_cache: bool = False, - num_parallel_calls: int = 4, - n_chunks_per_song: float = 2, - ) -> Any: - """ - TO BE DOCUMENTED. - """ - dataset = dataset_from_csv(csv_path) - dataset = self.compute_segments(dataset, n_chunks_per_song) - # Shuffle data - if shuffle: - dataset = dataset.shuffle( - buffer_size=200000, - seed=self._random_seed, - # useless since it is cached : - reshuffle_each_iteration=True, - ) - # Expand audio path. - dataset = dataset.map(self.expand_path) - # Load waveform, compute spectrogram, and filtering error, - # K bins frequencies, and waveform. - N = num_parallel_calls - for instrument in self.instruments: - dataset = ( - dataset.map(instrument.load_waveform, num_parallel_calls=N) - .filter(self.filter_error) - .map(instrument.compute_spectrogram, num_parallel_calls=N) - .map(instrument.filter_frequencies) - ) - dataset = dataset.map(self.filter_waveform) - # Convert to uint before caching in order to save space. - if convert_to_uint: - for instrument in self.instruments: - dataset = dataset.map(instrument.convert_to_uint) - dataset = self.cache(dataset, cache_directory, wait_for_cache) - # Check for INFINITY (should not happen) - for instrument in self.instruments: - dataset = dataset.filter(instrument.filter_infinity) - # Repeat indefinitly - if infinite_generator: - dataset = dataset.repeat(count=-1) - # Ensure same size for vocals and mix spectrograms. - # NOTE: could be done before caching ? - dataset = dataset.map(self.harmonize_spectrogram) - # Filter out too short segment. - # NOTE: could be done before caching ? - dataset = dataset.filter(self.filter_short_segments) - # Random time crop of 11.88s - if random_time_crop: - dataset = dataset.map(self.random_time_crop, num_parallel_calls=N) - else: - # frame_duration = 11.88/T - # take central segment (for validation) - for instrument in self.instruments: - dataset = dataset.map(instrument.time_crop) - # Post cache shuffling. Done where the data are the lightest: - # after croping but before converting back to float. - if shuffle: - dataset = dataset.shuffle( - buffer_size=256, seed=self._random_seed, reshuffle_each_iteration=True - ) - # Convert back to float32 - if convert_to_uint: - for instrument in self.instruments: - dataset = dataset.map( - instrument.convert_to_float32, num_parallel_calls=N - ) - M = 8 # Parallel call post caching. - # Must be applied with the same factor on mix and vocals. - if random_data_augmentation: - dataset = dataset.map(self.random_time_stretch, num_parallel_calls=M).map( - self.random_pitch_shift, num_parallel_calls=M - ) - # Filter by shape (remove badly shaped tensors). - for instrument in self.instruments: - dataset = dataset.filter(instrument.filter_shape).map( - instrument.reshape_spectrogram - ) - # Select features and annotation. - dataset = dataset.map(self.map_features) - # Make batch (done after selection to avoid - # error due to unprocessed instrument spectrogram batching). - dataset = dataset.batch(batch_size) - return dataset diff --git a/spaces/xuetao/bingo3/src/components/ui/tooltip.tsx b/spaces/xuetao/bingo3/src/components/ui/tooltip.tsx deleted file mode 100644 index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000 --- a/spaces/xuetao/bingo3/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -'use client' - -import * as React from 'react' -import * as TooltipPrimitive from '@radix-ui/react-tooltip' - -import { cn } from '@/lib/utils' - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/yangheng/Waifu2X-Image-Scale/Waifu2x/Models.py b/spaces/yangheng/Waifu2X-Image-Scale/Waifu2x/Models.py deleted file mode 100644 index 7e1d861f4908fbcb3912ff2f75d185ee33b39eb7..0000000000000000000000000000000000000000 --- a/spaces/yangheng/Waifu2X-Image-Scale/Waifu2x/Models.py +++ /dev/null @@ -1,316 +0,0 @@ -import json -from collections import OrderedDict -from math import exp - -from .Common import * - - -# +++++++++++++++++++++++++++++++++++++ -# FP16 Training -# ------------------------------------- -# Modified from Nvidia/Apex -# https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/fp16util.py - -class tofp16(nn.Module): - def __init__(self): - super(tofp16, self).__init__() - - def forward(self, input): - if input.is_cuda: - return input.half() - else: # PyTorch 1.0 doesn't support fp16 in CPU - return input.float() - - -def BN_convert_float(module): - if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): - module.float() - for child in module.children(): - BN_convert_float(child) - return module - - -def network_to_half(network): - return nn.Sequential(tofp16(), BN_convert_float(network.half())) - - -# warnings.simplefilter('ignore') - -# +++++++++++++++++++++++++++++++++++++ -# DCSCN -# ------------------------------------- - -class DCSCN(BaseModule): - # https://github.com/jiny2001/dcscn-super-resolution - def __init__(self, - color_channel=3, - up_scale=2, - feature_layers=12, - first_feature_filters=196, - last_feature_filters=48, - reconstruction_filters=128, - up_sampler_filters=32 - ): - super(DCSCN, self).__init__() - self.total_feature_channels = 0 - self.total_reconstruct_filters = 0 - self.upscale = up_scale - - self.act_fn = nn.SELU(inplace=False) - self.feature_block = self.make_feature_extraction_block(color_channel, - feature_layers, - first_feature_filters, - last_feature_filters) - - self.reconstruction_block = self.make_reconstruction_block(reconstruction_filters) - self.up_sampler = self.make_upsampler(up_sampler_filters, color_channel) - self.selu_init_params() - - def selu_init_params(self): - for i in self.modules(): - if isinstance(i, nn.Conv2d): - i.weight.data.normal_(0.0, 1.0 / sqrt(i.weight.numel())) - if i.bias is not None: - i.bias.data.fill_(0) - - def conv_block(self, in_channel, out_channel, kernel_size): - m = OrderedDict([ - # ("Padding", nn.ReplicationPad2d((kernel_size - 1) // 2)), - ('Conv2d', nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, padding=(kernel_size - 1) // 2)), - ('Activation', self.act_fn) - ]) - - return nn.Sequential(m) - - def make_feature_extraction_block(self, color_channel, num_layers, first_filters, last_filters): - # input layer - feature_block = [("Feature 1", self.conv_block(color_channel, first_filters, 3))] - # exponential decay - # rest layers - alpha_rate = log(first_filters / last_filters) / (num_layers - 1) - filter_nums = [round(first_filters * exp(-alpha_rate * i)) for i in range(num_layers)] - - self.total_feature_channels = sum(filter_nums) - - layer_filters = [[filter_nums[i], filter_nums[i + 1], 3] for i in range(num_layers - 1)] - - feature_block.extend([("Feature {}".format(index + 2), self.conv_block(*x)) - for index, x in enumerate(layer_filters)]) - return nn.Sequential(OrderedDict(feature_block)) - - def make_reconstruction_block(self, num_filters): - B1 = self.conv_block(self.total_feature_channels, num_filters // 2, 1) - B2 = self.conv_block(num_filters // 2, num_filters, 3) - m = OrderedDict([ - ("A", self.conv_block(self.total_feature_channels, num_filters, 1)), - ("B", nn.Sequential(*[B1, B2])) - ]) - self.total_reconstruct_filters = num_filters * 2 - return nn.Sequential(m) - - def make_upsampler(self, out_channel, color_channel): - out = out_channel * self.upscale ** 2 - m = OrderedDict([ - ('Conv2d_block', self.conv_block(self.total_reconstruct_filters, out, kernel_size=3)), - ('PixelShuffle', nn.PixelShuffle(self.upscale)), - ("Conv2d", nn.Conv2d(out_channel, color_channel, kernel_size=3, padding=1, bias=False)) - ]) - - return nn.Sequential(m) - - def forward(self, x): - # residual learning - lr, lr_up = x - feature = [] - for layer in self.feature_block.children(): - lr = layer(lr) - feature.append(lr) - feature = torch.cat(feature, dim=1) - - reconstruction = [layer(feature) for layer in self.reconstruction_block.children()] - reconstruction = torch.cat(reconstruction, dim=1) - - lr = self.up_sampler(reconstruction) - return lr + lr_up - - -# +++++++++++++++++++++++++++++++++++++ -# CARN -# ------------------------------------- - -class CARN_Block(BaseModule): - def __init__(self, channels, kernel_size=3, padding=1, dilation=1, - groups=1, activation=nn.SELU(), repeat=3, - SEBlock=False, conv=nn.Conv2d, - single_conv_size=1, single_conv_group=1): - super(CARN_Block, self).__init__() - m = [] - for i in range(repeat): - m.append(ResidualFixBlock(channels, channels, kernel_size=kernel_size, padding=padding, dilation=dilation, - groups=groups, activation=activation, conv=conv)) - if SEBlock: - m.append(SpatialChannelSqueezeExcitation(channels, reduction=channels)) - self.blocks = nn.Sequential(*m) - self.singles = nn.Sequential( - *[ConvBlock(channels * (i + 2), channels, kernel_size=single_conv_size, - padding=(single_conv_size - 1) // 2, groups=single_conv_group, - activation=activation, conv=conv) - for i in range(repeat)]) - - def forward(self, x): - c0 = x - for block, single in zip(self.blocks, self.singles): - b = block(x) - c0 = c = torch.cat([c0, b], dim=1) - x = single(c) - - return x - - -class CARN(BaseModule): - # Fast, Accurate, and Lightweight Super-Resolution with Cascading Residual Network - # https://github.com/nmhkahn/CARN-pytorch - def __init__(self, - color_channels=3, - mid_channels=64, - scale=2, - activation=nn.SELU(), - num_blocks=3, - conv=nn.Conv2d): - super(CARN, self).__init__() - - self.color_channels = color_channels - self.mid_channels = mid_channels - self.scale = scale - - self.entry_block = ConvBlock(color_channels, mid_channels, kernel_size=3, padding=1, activation=activation, - conv=conv) - self.blocks = nn.Sequential( - *[CARN_Block(mid_channels, kernel_size=3, padding=1, activation=activation, conv=conv, - single_conv_size=1, single_conv_group=1) - for _ in range(num_blocks)]) - self.singles = nn.Sequential( - *[ConvBlock(mid_channels * (i + 2), mid_channels, kernel_size=1, padding=0, - activation=activation, conv=conv) - for i in range(num_blocks)]) - - self.upsampler = UpSampleBlock(mid_channels, scale=scale, activation=activation, conv=conv) - self.exit_conv = conv(mid_channels, color_channels, kernel_size=3, padding=1) - - def forward(self, x): - x = self.entry_block(x) - c0 = x - for block, single in zip(self.blocks, self.singles): - b = block(x) - c0 = c = torch.cat([c0, b], dim=1) - x = single(c) - x = self.upsampler(x) - out = self.exit_conv(x) - return out - - -class CARN_V2(CARN): - def __init__(self, color_channels=3, mid_channels=64, - scale=2, activation=nn.LeakyReLU(0.1), - SEBlock=True, conv=nn.Conv2d, - atrous=(1, 1, 1), repeat_blocks=3, - single_conv_size=3, single_conv_group=1): - super(CARN_V2, self).__init__(color_channels=color_channels, mid_channels=mid_channels, scale=scale, - activation=activation, conv=conv) - - num_blocks = len(atrous) - m = [] - for i in range(num_blocks): - m.append(CARN_Block(mid_channels, kernel_size=3, padding=1, dilation=1, - activation=activation, SEBlock=SEBlock, conv=conv, repeat=repeat_blocks, - single_conv_size=single_conv_size, single_conv_group=single_conv_group)) - - self.blocks = nn.Sequential(*m) - - self.singles = nn.Sequential( - *[ConvBlock(mid_channels * (i + 2), mid_channels, kernel_size=single_conv_size, - padding=(single_conv_size - 1) // 2, groups=single_conv_group, - activation=activation, conv=conv) - for i in range(num_blocks)]) - - def forward(self, x): - x = self.entry_block(x) - c0 = x - res = x - for block, single in zip(self.blocks, self.singles): - b = block(x) - c0 = c = torch.cat([c0, b], dim=1) - x = single(c) - x = x + res - x = self.upsampler(x) - out = self.exit_conv(x) - return out - - -# +++++++++++++++++++++++++++++++++++++ -# original Waifu2x model -# ------------------------------------- - - -class UpConv_7(BaseModule): - # https://github.com/nagadomi/waifu2x/blob/3c46906cb78895dbd5a25c3705994a1b2e873199/lib/srcnn.lua#L311 - def __init__(self): - super(UpConv_7, self).__init__() - self.act_fn = nn.LeakyReLU(0.1, inplace=False) - self.offset = 7 # because of 0 padding - from torch.nn import ZeroPad2d - self.pad = ZeroPad2d(self.offset) - m = [nn.Conv2d(3, 16, 3, 1, 0), - self.act_fn, - nn.Conv2d(16, 32, 3, 1, 0), - self.act_fn, - nn.Conv2d(32, 64, 3, 1, 0), - self.act_fn, - nn.Conv2d(64, 128, 3, 1, 0), - self.act_fn, - nn.Conv2d(128, 128, 3, 1, 0), - self.act_fn, - nn.Conv2d(128, 256, 3, 1, 0), - self.act_fn, - # in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding= - nn.ConvTranspose2d(256, 3, kernel_size=4, stride=2, padding=3, bias=False) - ] - self.Sequential = nn.Sequential(*m) - - def load_pre_train_weights(self, json_file): - with open(json_file) as f: - weights = json.load(f) - box = [] - for i in weights: - box.append(i['weight']) - box.append(i['bias']) - own_state = self.state_dict() - for index, (name, param) in enumerate(own_state.items()): - own_state[name].copy_(torch.FloatTensor(box[index])) - - def forward(self, x): - x = self.pad(x) - return self.Sequential.forward(x) - - - -class Vgg_7(UpConv_7): - def __init__(self): - super(Vgg_7, self).__init__() - self.act_fn = nn.LeakyReLU(0.1, inplace=False) - self.offset = 7 - m = [nn.Conv2d(3, 32, 3, 1, 0), - self.act_fn, - nn.Conv2d(32, 32, 3, 1, 0), - self.act_fn, - nn.Conv2d(32, 64, 3, 1, 0), - self.act_fn, - nn.Conv2d(64, 64, 3, 1, 0), - self.act_fn, - nn.Conv2d(64, 128, 3, 1, 0), - self.act_fn, - nn.Conv2d(128, 128, 3, 1, 0), - self.act_fn, - nn.Conv2d(128, 3, 3, 1, 0) - ] - self.Sequential = nn.Sequential(*m) diff --git a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/generate_figures.py b/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/generate_figures.py deleted file mode 100644 index 45b68b86146198c701a66fb8ba7a363d901d6951..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/generate_figures.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Minimal script for reproducing the figures of the StyleGAN paper using pre-trained generators.""" - -import os -import pickle -import numpy as np -import PIL.Image -import dnnlib -import dnnlib.tflib as tflib -import config - -#---------------------------------------------------------------------------- -# Helpers for loading and using pre-trained generators. - -url_ffhq = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl -url_celebahq = 'https://drive.google.com/uc?id=1MGqJl28pN4t7SAtSrPdSRJSQJqahkzUf' # karras2019stylegan-celebahq-1024x1024.pkl -url_bedrooms = 'https://drive.google.com/uc?id=1MOSKeGF0FJcivpBI7s63V9YHloUTORiF' # karras2019stylegan-bedrooms-256x256.pkl -url_cars = 'https://drive.google.com/uc?id=1MJ6iCfNtMIRicihwRorsM3b7mmtmK9c3' # karras2019stylegan-cars-512x384.pkl -url_cats = 'https://drive.google.com/uc?id=1MQywl0FNt6lHu8E_EUqnRbviagS7fbiJ' # karras2019stylegan-cats-256x256.pkl - -synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8) - -_Gs_cache = dict() - -def load_Gs(url): - if url not in _Gs_cache: - with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f: - _G, _D, Gs = pickle.load(f) - _Gs_cache[url] = Gs - return _Gs_cache[url] - -#---------------------------------------------------------------------------- -# Figures 2, 3, 10, 11, 12: Multi-resolution grid of uncurated result images. - -def draw_uncurated_result_figure(png, Gs, cx, cy, cw, ch, rows, lods, seed): - print(png) - latents = np.random.RandomState(seed).randn(sum(rows * 2**lod for lod in lods), Gs.input_shape[1]) - images = Gs.run(latents, None, **synthesis_kwargs) # [seed, y, x, rgb] - - canvas = PIL.Image.new('RGB', (sum(cw // 2**lod for lod in lods), ch * rows), 'white') - image_iter = iter(list(images)) - for col, lod in enumerate(lods): - for row in range(rows * 2**lod): - image = PIL.Image.fromarray(next(image_iter), 'RGB') - image = image.crop((cx, cy, cx + cw, cy + ch)) - image = image.resize((cw // 2**lod, ch // 2**lod), PIL.Image.ANTIALIAS) - canvas.paste(image, (sum(cw // 2**lod for lod in lods[:col]), row * ch // 2**lod)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 3: Style mixing. - -def draw_style_mixing_figure(png, Gs, w, h, src_seeds, dst_seeds, style_ranges): - print(png) - src_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in src_seeds) - dst_latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in dst_seeds) - src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component] - dst_dlatents = Gs.components.mapping.run(dst_latents, None) # [seed, layer, component] - src_images = Gs.components.synthesis.run(src_dlatents, randomize_noise=False, **synthesis_kwargs) - dst_images = Gs.components.synthesis.run(dst_dlatents, randomize_noise=False, **synthesis_kwargs) - - canvas = PIL.Image.new('RGB', (w * (len(src_seeds) + 1), h * (len(dst_seeds) + 1)), 'white') - for col, src_image in enumerate(list(src_images)): - canvas.paste(PIL.Image.fromarray(src_image, 'RGB'), ((col + 1) * w, 0)) - for row, dst_image in enumerate(list(dst_images)): - canvas.paste(PIL.Image.fromarray(dst_image, 'RGB'), (0, (row + 1) * h)) - row_dlatents = np.stack([dst_dlatents[row]] * len(src_seeds)) - row_dlatents[:, style_ranges[row]] = src_dlatents[:, style_ranges[row]] - row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) - for col, image in enumerate(list(row_images)): - canvas.paste(PIL.Image.fromarray(image, 'RGB'), ((col + 1) * w, (row + 1) * h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 4: Noise detail. - -def draw_noise_detail_figure(png, Gs, w, h, num_samples, seeds): - print(png) - canvas = PIL.Image.new('RGB', (w * 3, h * len(seeds)), 'white') - for row, seed in enumerate(seeds): - latents = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1])] * num_samples) - images = Gs.run(latents, None, truncation_psi=1, **synthesis_kwargs) - canvas.paste(PIL.Image.fromarray(images[0], 'RGB'), (0, row * h)) - for i in range(4): - crop = PIL.Image.fromarray(images[i + 1], 'RGB') - crop = crop.crop((650, 180, 906, 436)) - crop = crop.resize((w//2, h//2), PIL.Image.NEAREST) - canvas.paste(crop, (w + (i%2) * w//2, row * h + (i//2) * h//2)) - diff = np.std(np.mean(images, axis=3), axis=0) * 4 - diff = np.clip(diff + 0.5, 0, 255).astype(np.uint8) - canvas.paste(PIL.Image.fromarray(diff, 'L'), (w * 2, row * h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 5: Noise components. - -def draw_noise_components_figure(png, Gs, w, h, seeds, noise_ranges, flips): - print(png) - Gsc = Gs.clone() - noise_vars = [var for name, var in Gsc.components.synthesis.vars.items() if name.startswith('noise')] - noise_pairs = list(zip(noise_vars, tflib.run(noise_vars))) # [(var, val), ...] - latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) - all_images = [] - for noise_range in noise_ranges: - tflib.set_vars({var: val * (1 if i in noise_range else 0) for i, (var, val) in enumerate(noise_pairs)}) - range_images = Gsc.run(latents, None, truncation_psi=1, randomize_noise=False, **synthesis_kwargs) - range_images[flips, :, :] = range_images[flips, :, ::-1] - all_images.append(list(range_images)) - - canvas = PIL.Image.new('RGB', (w * 2, h * 2), 'white') - for col, col_images in enumerate(zip(*all_images)): - canvas.paste(PIL.Image.fromarray(col_images[0], 'RGB').crop((0, 0, w//2, h)), (col * w, 0)) - canvas.paste(PIL.Image.fromarray(col_images[1], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, 0)) - canvas.paste(PIL.Image.fromarray(col_images[2], 'RGB').crop((0, 0, w//2, h)), (col * w, h)) - canvas.paste(PIL.Image.fromarray(col_images[3], 'RGB').crop((w//2, 0, w, h)), (col * w + w//2, h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Figure 8: Truncation trick. - -def draw_truncation_trick_figure(png, Gs, w, h, seeds, psis): - print(png) - latents = np.stack(np.random.RandomState(seed).randn(Gs.input_shape[1]) for seed in seeds) - dlatents = Gs.components.mapping.run(latents, None) # [seed, layer, component] - dlatent_avg = Gs.get_var('dlatent_avg') # [component] - - canvas = PIL.Image.new('RGB', (w * len(psis), h * len(seeds)), 'white') - for row, dlatent in enumerate(list(dlatents)): - row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(psis, [-1, 1, 1]) + dlatent_avg - row_images = Gs.components.synthesis.run(row_dlatents, randomize_noise=False, **synthesis_kwargs) - for col, image in enumerate(list(row_images)): - canvas.paste(PIL.Image.fromarray(image, 'RGB'), (col * w, row * h)) - canvas.save(png) - -#---------------------------------------------------------------------------- -# Main program. - -def main(): - tflib.init_tf() - os.makedirs(config.result_dir, exist_ok=True) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure02-uncurated-ffhq.png'), load_Gs(url_ffhq), cx=0, cy=0, cw=1024, ch=1024, rows=3, lods=[0,1,2,2,3,3], seed=5) - draw_style_mixing_figure(os.path.join(config.result_dir, 'figure03-style-mixing.png'), load_Gs(url_ffhq), w=1024, h=1024, src_seeds=[639,701,687,615,2268], dst_seeds=[888,829,1898,1733,1614,845], style_ranges=[range(0,4)]*3+[range(4,8)]*2+[range(8,18)]) - draw_noise_detail_figure(os.path.join(config.result_dir, 'figure04-noise-detail.png'), load_Gs(url_ffhq), w=1024, h=1024, num_samples=100, seeds=[1157,1012]) - draw_noise_components_figure(os.path.join(config.result_dir, 'figure05-noise-components.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[1967,1555], noise_ranges=[range(0, 18), range(0, 0), range(8, 18), range(0, 8)], flips=[1]) - draw_truncation_trick_figure(os.path.join(config.result_dir, 'figure08-truncation-trick.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[91,388], psis=[1, 0.7, 0.5, 0, -0.5, -1]) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure10-uncurated-bedrooms.png'), load_Gs(url_bedrooms), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=0) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure11-uncurated-cars.png'), load_Gs(url_cars), cx=0, cy=64, cw=512, ch=384, rows=4, lods=[0,1,2,2,3,3], seed=2) - draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure12-uncurated-cats.png'), load_Gs(url_cats), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=1) - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - main() - -#---------------------------------------------------------------------------- diff --git a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/training/__init__.py b/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/training/__init__.py deleted file mode 100644 index db8124b132f91216c0ded226f20ea3a046734728..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/training/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -# empty diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/services/SoundFontSynth.ts b/spaces/yderre-aubay/midi-player-demo/src/main/services/SoundFontSynth.ts deleted file mode 100644 index 1d4ba035001def31e9b2494ef5db2a1c5d948b2a..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/services/SoundFontSynth.ts +++ /dev/null @@ -1,71 +0,0 @@ -import { getSamplesFromSoundFont, SynthEvent } from "@ryohey/wavelet" -import { makeObservable, observable } from "mobx" -import { SendableEvent, SynthOutput } from "./SynthOutput" - -export class SoundFontSynth implements SynthOutput { - private synth: AudioWorkletNode | null = null - private soundFontURL: string - private context = new (window.AudioContext || window.webkitAudioContext)() - - private _loadedSoundFontData: ArrayBuffer | null = null - get loadedSoundFontData(): ArrayBuffer | null { - return this._loadedSoundFontData - } - - isLoading: boolean = true - - constructor(context: AudioContext, soundFontURL: string) { - this.context = context - this.soundFontURL = soundFontURL - - makeObservable(this, { - isLoading: observable, - }) - - this.setup().finally(() => { - this.isLoading = false - }) - } - - private async setup() { - const url = new URL("@ryohey/wavelet/dist/processor.js", import.meta.url) - await this.context.audioWorklet.addModule(url) - - this.synth = new AudioWorkletNode(this.context, "synth-processor", { - numberOfInputs: 0, - outputChannelCount: [2], - } as any) - this.synth.connect(this.context.destination) - - await this.loadSoundFont() - } - - private async loadSoundFont() { - const data = await (await fetch(this.soundFontURL)).arrayBuffer() - const samples = getSamplesFromSoundFont(new Uint8Array(data), this.context) - this._loadedSoundFontData = data - - for (const sample of samples) { - this.postSynthMessage( - sample, - [sample.sample.buffer], // transfer instead of copy - ) - } - } - - private postSynthMessage(e: SynthEvent, transfer?: Transferable[]) { - this.synth?.port.postMessage(e, transfer ?? []) - } - - sendEvent(event: SendableEvent, delayTime: number = 0) { - this.postSynthMessage({ - type: "midi", - midi: event, - delayTime: delayTime * this.context.sampleRate, - }) - } - - activate() { - this.context.resume() - } -} diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/util/slio.py b/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/util/slio.py deleted file mode 100644 index 72c1f0f7b82cdc931d381feef64fe15815ba657e..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/util/slio.py +++ /dev/null @@ -1,177 +0,0 @@ -# ========================================================== -# Modified from mmcv -# ========================================================== - -import json -import pickle -from abc import ABCMeta, abstractmethod -from pathlib import Path - -import yaml - -try: - from yaml import CLoader as Loader, CDumper as Dumper -except ImportError: - from yaml import Loader, Dumper - - -# =========================== -# Rigister handler -# =========================== - - -class BaseFileHandler(metaclass=ABCMeta): - @abstractmethod - def load_from_fileobj(self, file, **kwargs): - pass - - @abstractmethod - def dump_to_fileobj(self, obj, file, **kwargs): - pass - - @abstractmethod - def dump_to_str(self, obj, **kwargs): - pass - - def load_from_path(self, filepath, mode="r", **kwargs): - with open(filepath, mode) as f: - return self.load_from_fileobj(f, **kwargs) - - def dump_to_path(self, obj, filepath, mode="w", **kwargs): - with open(filepath, mode) as f: - self.dump_to_fileobj(obj, f, **kwargs) - - -class JsonHandler(BaseFileHandler): - def load_from_fileobj(self, file): - return json.load(file) - - def dump_to_fileobj(self, obj, file, **kwargs): - json.dump(obj, file, **kwargs) - - def dump_to_str(self, obj, **kwargs): - return json.dumps(obj, **kwargs) - - -class PickleHandler(BaseFileHandler): - def load_from_fileobj(self, file, **kwargs): - return pickle.load(file, **kwargs) - - def load_from_path(self, filepath, **kwargs): - return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault("protocol", 2) - return pickle.dumps(obj, **kwargs) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault("protocol", 2) - pickle.dump(obj, file, **kwargs) - - def dump_to_path(self, obj, filepath, **kwargs): - super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs) - - -class YamlHandler(BaseFileHandler): - def load_from_fileobj(self, file, **kwargs): - kwargs.setdefault("Loader", Loader) - return yaml.load(file, **kwargs) - - def dump_to_fileobj(self, obj, file, **kwargs): - kwargs.setdefault("Dumper", Dumper) - yaml.dump(obj, file, **kwargs) - - def dump_to_str(self, obj, **kwargs): - kwargs.setdefault("Dumper", Dumper) - return yaml.dump(obj, **kwargs) - - -file_handlers = { - "json": JsonHandler(), - "yaml": YamlHandler(), - "yml": YamlHandler(), - "pickle": PickleHandler(), - "pkl": PickleHandler(), -} - -# =========================== -# load and dump -# =========================== - - -def is_str(x): - """Whether the input is an string instance. - - Note: This method is deprecated since python 2 is no longer supported. - """ - return isinstance(x, str) - - -def slload(file, file_format=None, **kwargs): - """Load data from json/yaml/pickle files. - - This method provides a unified api for loading data from serialized files. - - Args: - file (str or :obj:`Path` or file-like object): Filename or a file-like - object. - file_format (str, optional): If not specified, the file format will be - inferred from the file extension, otherwise use the specified one. - Currently supported formats include "json", "yaml/yml" and - "pickle/pkl". - - Returns: - The content from the file. - """ - if isinstance(file, Path): - file = str(file) - if file_format is None and is_str(file): - file_format = file.split(".")[-1] - if file_format not in file_handlers: - raise TypeError(f"Unsupported format: {file_format}") - - handler = file_handlers[file_format] - if is_str(file): - obj = handler.load_from_path(file, **kwargs) - elif hasattr(file, "read"): - obj = handler.load_from_fileobj(file, **kwargs) - else: - raise TypeError('"file" must be a filepath str or a file-object') - return obj - - -def sldump(obj, file=None, file_format=None, **kwargs): - """Dump data to json/yaml/pickle strings or files. - - This method provides a unified api for dumping data as strings or to files, - and also supports custom arguments for each file format. - - Args: - obj (any): The python object to be dumped. - file (str or :obj:`Path` or file-like object, optional): If not - specified, then the object is dump to a str, otherwise to a file - specified by the filename or file-like object. - file_format (str, optional): Same as :func:`load`. - - Returns: - bool: True for success, False otherwise. - """ - if isinstance(file, Path): - file = str(file) - if file_format is None: - if is_str(file): - file_format = file.split(".")[-1] - elif file is None: - raise ValueError("file_format must be specified since file is None") - if file_format not in file_handlers: - raise TypeError(f"Unsupported format: {file_format}") - - handler = file_handlers[file_format] - if file is None: - return handler.dump_to_str(obj, **kwargs) - elif is_str(file): - handler.dump_to_path(obj, file, **kwargs) - elif hasattr(file, "write"): - handler.dump_to_fileobj(obj, file, **kwargs) - else: - raise TypeError('"file" must be a filename str or a file-object') diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/distilbert/modeling_tf_distilbert.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/distilbert/modeling_tf_distilbert.py deleted file mode 100644 index 6b0e1b0f3febcf0b53eb53e4cf9ed6ef7f4a1d13..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/distilbert/modeling_tf_distilbert.py +++ /dev/null @@ -1,993 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - TF 2.0 DistilBERT model -""" - - -from __future__ import annotations - -import warnings -from typing import Optional, Tuple, Union - -import numpy as np -import tensorflow as tf - -from ...activations_tf import get_tf_activation -from ...modeling_tf_outputs import ( - TFBaseModelOutput, - TFMaskedLMOutput, - TFMultipleChoiceModelOutput, - TFQuestionAnsweringModelOutput, - TFSequenceClassifierOutput, - TFTokenClassifierOutput, -) -from ...modeling_tf_utils import ( - TFMaskedLanguageModelingLoss, - TFModelInputType, - TFMultipleChoiceLoss, - TFPreTrainedModel, - TFQuestionAnsweringLoss, - TFSequenceClassificationLoss, - TFTokenClassificationLoss, - get_initializer, - keras_serializable, - unpack_inputs, -) -from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax -from ...utils import ( - add_code_sample_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, -) -from .configuration_distilbert import DistilBertConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "distilbert-base-uncased" -_CONFIG_FOR_DOC = "DistilBertConfig" - -TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "distilbert-base-uncased", - "distilbert-base-uncased-distilled-squad", - "distilbert-base-cased", - "distilbert-base-cased-distilled-squad", - "distilbert-base-multilingual-cased", - "distilbert-base-uncased-finetuned-sst-2-english", - # See all DistilBERT models at https://huggingface.co/models?filter=distilbert -] - - -class TFEmbeddings(tf.keras.layers.Layer): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - self.config = config - self.dim = config.dim - self.initializer_range = config.initializer_range - self.max_position_embeddings = config.max_position_embeddings - self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm") - self.dropout = tf.keras.layers.Dropout(rate=config.dropout) - - def build(self, input_shape: tf.TensorShape): - with tf.name_scope("word_embeddings"): - self.weight = self.add_weight( - name="weight", - shape=[self.config.vocab_size, self.dim], - initializer=get_initializer(initializer_range=self.initializer_range), - ) - - with tf.name_scope("position_embeddings"): - self.position_embeddings = self.add_weight( - name="embeddings", - shape=[self.max_position_embeddings, self.dim], - initializer=get_initializer(initializer_range=self.initializer_range), - ) - - super().build(input_shape) - - def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False): - """ - Applies embedding based on inputs tensor. - - Returns: - final_embeddings (`tf.Tensor`): output embedding tensor. - """ - assert not (input_ids is None and inputs_embeds is None) - - if input_ids is not None: - check_embeddings_within_bounds(input_ids, self.config.vocab_size) - inputs_embeds = tf.gather(params=self.weight, indices=input_ids) - - input_shape = shape_list(inputs_embeds)[:-1] - - if position_ids is None: - position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) - - position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) - final_embeddings = inputs_embeds + position_embeds - final_embeddings = self.LayerNorm(inputs=final_embeddings) - final_embeddings = self.dropout(inputs=final_embeddings, training=training) - - return final_embeddings - - -class TFMultiHeadSelfAttention(tf.keras.layers.Layer): - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - - self.n_heads = config.n_heads - self.dim = config.dim - self.dropout = tf.keras.layers.Dropout(config.attention_dropout) - self.output_attentions = config.output_attentions - - assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}" - - self.q_lin = tf.keras.layers.Dense( - config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin" - ) - self.k_lin = tf.keras.layers.Dense( - config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin" - ) - self.v_lin = tf.keras.layers.Dense( - config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin" - ) - self.out_lin = tf.keras.layers.Dense( - config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin" - ) - - self.pruned_heads = set() - - def prune_heads(self, heads): - raise NotImplementedError - - def call(self, query, key, value, mask, head_mask, output_attentions, training=False): - """ - Parameters: - query: tf.Tensor(bs, seq_length, dim) - key: tf.Tensor(bs, seq_length, dim) - value: tf.Tensor(bs, seq_length, dim) - mask: tf.Tensor(bs, seq_length) - - Returns: - weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs, - seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` - """ - bs, q_length, dim = shape_list(query) - k_length = shape_list(key)[1] - # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' - # assert key.size() == value.size() - dim_per_head = int(self.dim / self.n_heads) - dim_per_head = tf.cast(dim_per_head, dtype=tf.int32) - mask_reshape = [bs, 1, 1, k_length] - - def shape(x): - """separate heads""" - return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) - - def unshape(x): - """group heads""" - return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) - - q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) - k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) - v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) - q = tf.cast(q, dtype=tf.float32) - q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32))) - k = tf.cast(k, dtype=q.dtype) - scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length) - mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen) - # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length) - - mask = tf.cast(mask, dtype=scores.dtype) - scores = scores - 1e30 * (1.0 - mask) - weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen) - weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen) - - # Mask heads if we want to - if head_mask is not None: - weights = weights * head_mask - - context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) - context = unshape(context) # (bs, q_length, dim) - context = self.out_lin(context) # (bs, q_length, dim) - - if output_attentions: - return (context, weights) - else: - return (context,) - - -class TFFFN(tf.keras.layers.Layer): - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.lin1 = tf.keras.layers.Dense( - config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1" - ) - self.lin2 = tf.keras.layers.Dense( - config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2" - ) - self.activation = get_tf_activation(config.activation) - - def call(self, input, training=False): - x = self.lin1(input) - x = self.activation(x) - x = self.lin2(x) - x = self.dropout(x, training=training) - return x - - -class TFTransformerBlock(tf.keras.layers.Layer): - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - - self.n_heads = config.n_heads - self.dim = config.dim - self.hidden_dim = config.hidden_dim - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.activation = config.activation - self.output_attentions = config.output_attentions - - assert ( - config.dim % config.n_heads == 0 - ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}" - - self.attention = TFMultiHeadSelfAttention(config, name="attention") - self.sa_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm") - - self.ffn = TFFFN(config, name="ffn") - self.output_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm") - - def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None - """ - Parameters: - x: tf.Tensor(bs, seq_length, dim) - attn_mask: tf.Tensor(bs, seq_length) - - Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output: - tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization. - """ - # Self-Attention - sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training) - if output_attentions: - sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length) - else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples - # assert type(sa_output) == tuple - sa_output = sa_output[0] - sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim) - - # Feed Forward Network - ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim) - ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim) - - output = (ffn_output,) - if output_attentions: - output = (sa_weights,) + output - return output - - -class TFTransformer(tf.keras.layers.Layer): - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - self.n_layers = config.n_layers - self.output_hidden_states = config.output_hidden_states - self.output_attentions = config.output_attentions - - self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)] - - def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False): - # docstyle-ignore - """ - Parameters: - x: tf.Tensor(bs, seq_length, dim) Input sequence embedded. - attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence. - - Returns: - hidden_state: tf.Tensor(bs, seq_length, dim) - Sequence of hidden states in the last (top) layer - all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)] - Tuple of length n_layers with the hidden states from each layer. - Optional: only if output_hidden_states=True - all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)] - Tuple of length n_layers with the attention weights from each layer - Optional: only if output_attentions=True - """ - all_hidden_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - hidden_state = x - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_state,) - - layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training) - hidden_state = layer_outputs[-1] - - if output_attentions: - assert len(layer_outputs) == 2 - attentions = layer_outputs[0] - all_attentions = all_attentions + (attentions,) - else: - assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1" - - # Add last layer - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_state,) - - if not return_dict: - return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None) - return TFBaseModelOutput( - last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions - ) - - -@keras_serializable -class TFDistilBertMainLayer(tf.keras.layers.Layer): - config_class = DistilBertConfig - - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - - self.config = config - self.num_hidden_layers = config.num_hidden_layers - self.output_attentions = config.output_attentions - self.output_hidden_states = config.output_hidden_states - self.return_dict = config.use_return_dict - - self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings - self.transformer = TFTransformer(config, name="transformer") # Encoder - - def get_input_embeddings(self): - return self.embeddings - - def set_input_embeddings(self, value): - self.embeddings.weight = value - self.embeddings.vocab_size = value.shape[0] - - def _prune_heads(self, heads_to_prune): - raise NotImplementedError - - @unpack_inputs - def call( - self, - input_ids=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, - ): - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = shape_list(input_ids) - elif inputs_embeds is not None: - input_shape = shape_list(inputs_embeds)[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if attention_mask is None: - attention_mask = tf.ones(input_shape) # (bs, seq_length) - - attention_mask = tf.cast(attention_mask, dtype=tf.float32) - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - if head_mask is not None: - raise NotImplementedError - else: - head_mask = [None] * self.num_hidden_layers - - embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim) - tfmr_output = self.transformer( - embedding_output, - attention_mask, - head_mask, - output_attentions, - output_hidden_states, - return_dict, - training=training, - ) - - return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions) - - -# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # -class TFDistilBertPreTrainedModel(TFPreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = DistilBertConfig - base_model_prefix = "distilbert" - - -DISTILBERT_START_DOCSTRING = r""" - - This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it - as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and - behavior. - - - - TensorFlow models and layers in `transformers` accept two formats as input: - - - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional argument. - - The reason the second format is supported is that Keras methods prefer this format when passing inputs to models - and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just - pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second - format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with - the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first - positional argument: - - - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` - - Note that when creating models and layers with - [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry - about any of this, as you can just pass inputs like you would to any other Python function! - - - - Parameters: - config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -DISTILBERT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and - [`PreTrainedTokenizer.encode`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the - config will be used instead. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. This argument can be used only in eager mode, in graph mode the value in the config will be - used instead. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in - eager mode, in graph mode the value will always be set to True. - training (`bool`, *optional*, defaults to `False`): - Whether or not to use the model in training mode (some modules like dropout modules have different - behaviors between training and evaluation). -""" - - -@add_start_docstrings( - "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", - DISTILBERT_START_DOCSTRING, -) -class TFDistilBertModel(TFDistilBertPreTrainedModel): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings - - @unpack_inputs - @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFBaseModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: - outputs = self.distilbert( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - return outputs - - -class TFDistilBertLMHead(tf.keras.layers.Layer): - def __init__(self, config, input_embeddings, **kwargs): - super().__init__(**kwargs) - - self.config = config - self.dim = config.dim - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.input_embeddings = input_embeddings - - def build(self, input_shape): - self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") - - super().build(input_shape) - - def get_output_embeddings(self): - return self.input_embeddings - - def set_output_embeddings(self, value): - self.input_embeddings.weight = value - self.input_embeddings.vocab_size = shape_list(value)[0] - - def get_bias(self): - return {"bias": self.bias} - - def set_bias(self, value): - self.bias = value["bias"] - self.config.vocab_size = shape_list(value["bias"])[0] - - def call(self, hidden_states): - seq_length = shape_list(tensor=hidden_states)[1] - hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim]) - hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) - hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) - hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) - - return hidden_states - - -@add_start_docstrings( - """DistilBert Model with a `masked language modeling` head on top.""", - DISTILBERT_START_DOCSTRING, -) -class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.config = config - - self.distilbert = TFDistilBertMainLayer(config, name="distilbert") - self.vocab_transform = tf.keras.layers.Dense( - config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform" - ) - self.act = get_tf_activation(config.activation) - self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm") - self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector") - - def get_lm_head(self): - return self.vocab_projector - - def get_prefix_bias_name(self): - warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) - return self.name + "/" + self.vocab_projector.name - - @unpack_inputs - @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFMaskedLMOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` - """ - distilbert_output = self.distilbert( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - hidden_states = distilbert_output[0] # (bs, seq_length, dim) - prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim) - prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim) - prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim) - prediction_logits = self.vocab_projector(prediction_logits) - - loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits) - - if not return_dict: - output = (prediction_logits,) + distilbert_output[1:] - return ((loss,) + output) if loss is not None else output - - return TFMaskedLMOutput( - loss=loss, - logits=prediction_logits, - hidden_states=distilbert_output.hidden_states, - attentions=distilbert_output.attentions, - ) - - -@add_start_docstrings( - """ - DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the - pooled output) e.g. for GLUE tasks. - """, - DISTILBERT_START_DOCSTRING, -) -class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.num_labels = config.num_labels - - self.distilbert = TFDistilBertMainLayer(config, name="distilbert") - self.pre_classifier = tf.keras.layers.Dense( - config.dim, - kernel_initializer=get_initializer(config.initializer_range), - activation="relu", - name="pre_classifier", - ) - self.classifier = tf.keras.layers.Dense( - config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" - ) - self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout) - - @unpack_inputs - @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFSequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - distilbert_output = self.distilbert( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - hidden_state = distilbert_output[0] # (bs, seq_len, dim) - pooled_output = hidden_state[:, 0] # (bs, dim) - pooled_output = self.pre_classifier(pooled_output) # (bs, dim) - pooled_output = self.dropout(pooled_output, training=training) # (bs, dim) - logits = self.classifier(pooled_output) # (bs, dim) - - loss = None if labels is None else self.hf_compute_loss(labels, logits) - - if not return_dict: - output = (logits,) + distilbert_output[1:] - return ((loss,) + output) if loss is not None else output - - return TFSequenceClassifierOutput( - loss=loss, - logits=logits, - hidden_states=distilbert_output.hidden_states, - attentions=distilbert_output.attentions, - ) - - -@add_start_docstrings( - """ - DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. - for Named-Entity-Recognition (NER) tasks. - """, - DISTILBERT_START_DOCSTRING, -) -class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.num_labels = config.num_labels - - self.distilbert = TFDistilBertMainLayer(config, name="distilbert") - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.classifier = tf.keras.layers.Dense( - config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFTokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - outputs = self.distilbert( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = outputs[0] - sequence_output = self.dropout(sequence_output, training=training) - logits = self.classifier(sequence_output) - loss = None if labels is None else self.hf_compute_loss(labels, logits) - - if not return_dict: - output = (logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFTokenClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and - a softmax) e.g. for RocStories/SWAG tasks. - """, - DISTILBERT_START_DOCSTRING, -) -class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.distilbert = TFDistilBertMainLayer(config, name="distilbert") - self.dropout = tf.keras.layers.Dropout(config.seq_classif_dropout) - self.pre_classifier = tf.keras.layers.Dense( - config.dim, - kernel_initializer=get_initializer(config.initializer_range), - activation="relu", - name="pre_classifier", - ) - self.classifier = tf.keras.layers.Dense( - 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward( - DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") - ) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFMultipleChoiceModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` - where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) - """ - if input_ids is not None: - num_choices = shape_list(input_ids)[1] - seq_length = shape_list(input_ids)[2] - else: - num_choices = shape_list(inputs_embeds)[1] - seq_length = shape_list(inputs_embeds)[2] - - flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None - flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None - flat_inputs_embeds = ( - tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) - if inputs_embeds is not None - else None - ) - distilbert_output = self.distilbert( - flat_input_ids, - flat_attention_mask, - head_mask, - flat_inputs_embeds, - output_attentions, - output_hidden_states, - return_dict=return_dict, - training=training, - ) - hidden_state = distilbert_output[0] # (bs, seq_len, dim) - pooled_output = hidden_state[:, 0] # (bs, dim) - pooled_output = self.pre_classifier(pooled_output) # (bs, dim) - pooled_output = self.dropout(pooled_output, training=training) # (bs, dim) - logits = self.classifier(pooled_output) - reshaped_logits = tf.reshape(logits, (-1, num_choices)) - - loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) - - if not return_dict: - output = (reshaped_logits,) + distilbert_output[1:] - return ((loss,) + output) if loss is not None else output - - return TFMultipleChoiceModelOutput( - loss=loss, - logits=reshaped_logits, - hidden_states=distilbert_output.hidden_states, - attentions=distilbert_output.attentions, - ) - - -@add_start_docstrings( - """ - DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a - linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - DISTILBERT_START_DOCSTRING, -) -class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss): - def __init__(self, config, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.distilbert = TFDistilBertMainLayer(config, name="distilbert") - self.qa_outputs = tf.keras.layers.Dense( - config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" - ) - assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2" - self.dropout = tf.keras.layers.Dropout(config.qa_dropout) - - @unpack_inputs - @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFQuestionAnsweringModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - head_mask: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - start_positions: np.ndarray | tf.Tensor | None = None, - end_positions: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: - r""" - start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - """ - distilbert_output = self.distilbert( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - hidden_states = distilbert_output[0] # (bs, max_query_len, dim) - hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim) - logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2) - start_logits, end_logits = tf.split(logits, 2, axis=-1) - start_logits = tf.squeeze(start_logits, axis=-1) - end_logits = tf.squeeze(end_logits, axis=-1) - - loss = None - if start_positions is not None and end_positions is not None: - labels = {"start_position": start_positions} - labels["end_position"] = end_positions - loss = self.hf_compute_loss(labels, (start_logits, end_logits)) - - if not return_dict: - output = (start_logits, end_logits) + distilbert_output[1:] - return ((loss,) + output) if loss is not None else output - - return TFQuestionAnsweringModelOutput( - loss=loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=distilbert_output.hidden_states, - attentions=distilbert_output.attentions, - ) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/__init__.py deleted file mode 100644 index da53011b87b318bbef0d48557284d290f92a9fe4..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/dpt/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available -from ...utils import OptionalDependencyNotAvailable - - -_import_structure = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_dpt"] = ["DPTFeatureExtractor"] - _import_structure["image_processing_dpt"] = ["DPTImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_dpt"] = [ - "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", - "DPTForDepthEstimation", - "DPTForSemanticSegmentation", - "DPTModel", - "DPTPreTrainedModel", - ] - - -if TYPE_CHECKING: - from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_dpt import DPTFeatureExtractor - from .image_processing_dpt import DPTImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_dpt import ( - DPT_PRETRAINED_MODEL_ARCHIVE_LIST, - DPTForDepthEstimation, - DPTForSemanticSegmentation, - DPTModel, - DPTPreTrainedModel, - ) - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/falcon/configuration_falcon.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/falcon/configuration_falcon.py deleted file mode 100644 index fce21b146cf97f191016cdf73d1029be5f7bea91..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/falcon/configuration_falcon.py +++ /dev/null @@ -1,191 +0,0 @@ -# coding=utf-8 -# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Falcon configuration""" -from ...configuration_utils import PretrainedConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json", - "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json", -} - - -class FalconConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon - model according to the specified arguments, defining the model architecture. Instantiating a configuration with the - defaults will yield a similar configuration to that of the - [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 65024): - Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`FalconModel`] - hidden_size (`int`, *optional*, defaults to 4544): - Dimension of the hidden representations. - num_hidden_layers (`int`, *optional*, defaults to 32): - Number of hidden layers in the Transformer decoder. - num_attention_heads (`int`, *optional*, defaults to 71): - Number of attention heads for each attention layer in the Transformer encoder. - layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): - The epsilon used by the layer normalization layers. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - use_cache (`bool`, *optional*, defaults to `True`): - Whether the model should return the last key/values attentions (not used by all models). Only relevant if - `config.is_decoder=True`. - hidden_dropout (`float`, *optional*, defaults to 0.0): - The dropout probability for MLP layers. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout probability for attention layers. - num_kv_heads (`int`, *optional*): - Number of key-value heads to use per attention layer. If unset, defaults to the same value as - `num_attention_heads`. - alibi (`bool`, *optional*, defaults to `False`): - Whether to use ALiBi positional biases during self-attention. - new_decoder_architecture (`bool`, *optional*, defaults to `False`): - Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn` - arguments are ignored, as the new decoder always uses parallel attention. - multi_query (`bool`, *optional*, defaults to `True`): - Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`. - parallel_attn (`bool`, *optional*, defaults to `True`): - Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive - instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`. - bias (`bool`, *optional*, defaults to `False`): - Whether to use bias on Linear layers. - max_position_embeddings (`int`, *optional*, defaults to 2048): - The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained - Falcon models with RoPE support up to 2048 tokens. - rope_theta (`float`, *optional*, defaults to 10000.0): - The base period of the RoPE embeddings. - rope_scaling (`Dict`, *optional*): - Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling - strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format - is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update - `max_position_embeddings` to the expected new maximum. See the following thread for more information on how - these scaling strategies behave: - https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an - experimental feature, subject to breaking API changes in future versions. - bos_token_id (`int`, *optional*, defaults to 11): - The id of the "beginning-of-sequence" token. - eos_token_id (`int`, *optional*, defaults to 11): - The id of the "end-of-sequence" token. - - Example: - - ```pytho - >>> from transformers import FalconModel, FalconConfig - - >>> # Initializing a small (2-layer) Falcon configuration - >>> configuration = FalconConfig(num_hidden_layers=2) - - >>> # Initializing a model from the small configuration - >>> model = FalconModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "falcon" - keys_to_ignore_at_inference = ["past_key_values"] - - def __init__( - self, - vocab_size=65024, - hidden_size=4544, - num_hidden_layers=32, - num_attention_heads=71, - layer_norm_epsilon=1e-5, - initializer_range=0.02, - use_cache=True, - hidden_dropout=0.0, - attention_dropout=0.0, - num_kv_heads=None, - alibi=False, - new_decoder_architecture=False, - multi_query=True, - parallel_attn=True, - bias=False, - max_position_embeddings=2048, - rope_theta=10000.0, - rope_scaling=None, - bos_token_id=11, - eos_token_id=11, - **kwargs, - ): - self.vocab_size = vocab_size - # Backward compatibility with n_embed kwarg - n_embed = kwargs.pop("n_embed", None) - self.hidden_size = hidden_size if n_embed is None else n_embed - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.use_cache = use_cache - self.hidden_dropout = hidden_dropout - self.attention_dropout = attention_dropout - - self.bos_token_id = bos_token_id - self.eos_token_id = eos_token_id - self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads - self.alibi = alibi - self.new_decoder_architecture = new_decoder_architecture - self.multi_query = multi_query # Ignored when new_decoder_architecture is True - self.parallel_attn = parallel_attn - self.bias = bias - self.max_position_embeddings = max_position_embeddings - self.rope_theta = rope_theta - self.rope_scaling = rope_scaling - self._rope_scaling_validation() - - super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) - - @property - def head_dim(self): - return self.hidden_size // self.num_attention_heads - - @property - def rotary(self): - return not self.alibi - - def _rope_scaling_validation(self): - """ - Validate the `rope_scaling` configuration. - """ - if self.rope_scaling is None: - return - - if self.rotary: - raise ValueError("`rope_scaling` is not supported when `alibi` is `True`.") - - if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: - raise ValueError( - "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " - f"got {self.rope_scaling}" - ) - rope_scaling_type = self.rope_scaling.get("type", None) - rope_scaling_factor = self.rope_scaling.get("factor", None) - if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: - raise ValueError( - f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" - ) - if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: - raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}") diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/bugs.md b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/bugs.md deleted file mode 100644 index d0235c708ab6b0cdadb5865110e9e8c22ca313aa..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/.github/ISSUE_TEMPLATE/bugs.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -name: "🐛 Bugs" -about: Report bugs in detectron2 -title: Please read & provide the following - ---- - -## Instructions To Reproduce the 🐛 Bug: -1. Full runnable code or full changes you made: -``` -If making changes to the project itself, please use output of the following command: -git rev-parse HEAD; git diff - - -``` -2. What exact command you run: -3. __Full logs__ or other relevant observations: -``` - -``` -4. please simplify the steps as much as possible so they do not require additional resources to - run, such as a private dataset. - -## Expected behavior: - -If there are no obvious error in "full logs" provided above, -please tell us the expected behavior. - -## Environment: - -Provide your environment information using the following command: -``` -wget -nc -q https://github.com/facebookresearch/detectron2/raw/main/detectron2/utils/collect_env.py && python collect_env.py -``` - -If your issue looks like an installation issue / environment issue, -please first try to solve it yourself with the instructions in -https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dlafpn.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dlafpn.py deleted file mode 100644 index 2a33c66bf3d5b97bf882eaf0b80de012151a62b4..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/dlafpn.py +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# this file is from https://github.com/ucbdrive/dla/blob/master/dla.py. - -import math -from os.path import join -import numpy as np - -import torch -from torch import nn -import torch.utils.model_zoo as model_zoo -import torch.nn.functional as F -import fvcore.nn.weight_init as weight_init - -from detectron2.modeling.backbone import FPN -from detectron2.layers import ShapeSpec, ModulatedDeformConv, Conv2d -from detectron2.modeling.backbone.build import BACKBONE_REGISTRY -from detectron2.layers.batch_norm import get_norm -from detectron2.modeling.backbone import Backbone - -WEB_ROOT = 'http://dl.yf.io/dla/models' - - -def get_model_url(data, name, hash): - return join( - 'http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash)) - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -class BasicBlock(nn.Module): - def __init__(self, cfg, inplanes, planes, stride=1, dilation=1): - super(BasicBlock, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, - stride=stride, padding=dilation, - bias=False, dilation=dilation) - self.bn1 = get_norm(cfg.MODEL.DLA.NORM, planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, - stride=1, padding=dilation, - bias=False, dilation=dilation) - self.bn2 = get_norm(cfg.MODEL.DLA.NORM, planes) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 2 - - def __init__(self, cfg, inplanes, planes, stride=1, dilation=1): - super(Bottleneck, self).__init__() - expansion = Bottleneck.expansion - bottle_planes = planes // expansion - self.conv1 = nn.Conv2d(inplanes, bottle_planes, - kernel_size=1, bias=False) - self.bn1 = get_norm(cfg.MODEL.DLA.NORM, bottle_planes) - self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3, - stride=stride, padding=dilation, - bias=False, dilation=dilation) - self.bn2 = get_norm(cfg.MODEL.DLA.NORM, bottle_planes) - self.conv3 = nn.Conv2d(bottle_planes, planes, - kernel_size=1, bias=False) - self.bn3 = get_norm(cfg.MODEL.DLA.NORM, planes) - self.relu = nn.ReLU(inplace=True) - self.stride = stride - - def forward(self, x, residual=None): - if residual is None: - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - out += residual - out = self.relu(out) - - return out - - -class Root(nn.Module): - def __init__(self, cfg, in_channels, out_channels, kernel_size, residual): - super(Root, self).__init__() - self.conv = nn.Conv2d( - in_channels, out_channels, kernel_size, - stride=1, bias=False, padding=(kernel_size - 1) // 2) - self.bn = get_norm(cfg.MODEL.DLA.NORM, out_channels) - self.relu = nn.ReLU(inplace=True) - self.residual = residual - - def forward(self, *x): - children = x - x = self.conv(torch.cat(x, 1)) - x = self.bn(x) - if self.residual: - x += children[0] - x = self.relu(x) - - return x - - -class Tree(nn.Module): - def __init__(self, cfg, levels, block, in_channels, out_channels, stride=1, - level_root=False, root_dim=0, root_kernel_size=1, - dilation=1, root_residual=False): - super(Tree, self).__init__() - if root_dim == 0: - root_dim = 2 * out_channels - if level_root: - root_dim += in_channels - if levels == 1: - self.tree1 = block(cfg, in_channels, out_channels, stride, - dilation=dilation) - self.tree2 = block(cfg, out_channels, out_channels, 1, - dilation=dilation) - else: - self.tree1 = Tree(cfg, levels - 1, block, in_channels, out_channels, - stride, root_dim=0, - root_kernel_size=root_kernel_size, - dilation=dilation, root_residual=root_residual) - self.tree2 = Tree(cfg, levels - 1, block, out_channels, out_channels, - root_dim=root_dim + out_channels, - root_kernel_size=root_kernel_size, - dilation=dilation, root_residual=root_residual) - if levels == 1: - self.root = Root(cfg, root_dim, out_channels, root_kernel_size, - root_residual) - self.level_root = level_root - self.root_dim = root_dim - self.downsample = None - self.project = None - self.levels = levels - if stride > 1: - self.downsample = nn.MaxPool2d(stride, stride=stride) - if in_channels != out_channels: - self.project = nn.Sequential( - nn.Conv2d(in_channels, out_channels, - kernel_size=1, stride=1, bias=False), - get_norm(cfg.MODEL.DLA.NORM, out_channels) - ) - - def forward(self, x, residual=None, children=None): - if self.training and residual is not None: - x = x + residual.sum() * 0.0 - children = [] if children is None else children - bottom = self.downsample(x) if self.downsample else x - residual = self.project(bottom) if self.project else bottom - if self.level_root: - children.append(bottom) - x1 = self.tree1(x, residual) - if self.levels == 1: - x2 = self.tree2(x1) - x = self.root(x2, x1, *children) - else: - children.append(x1) - x = self.tree2(x1, children=children) - return x - - -class DLA(Backbone): - def __init__(self, cfg, levels, channels, block=BasicBlock, residual_root=False): - super(DLA, self).__init__() - self.cfg = cfg - self.channels = channels - - self._out_features = ["dla{}".format(i) for i in range(6)] - self._out_feature_channels = {k: channels[i] for i, k in enumerate(self._out_features)} - self._out_feature_strides = {k: 2 ** i for i, k in enumerate(self._out_features)} - - self.base_layer = nn.Sequential( - nn.Conv2d(3, channels[0], kernel_size=7, stride=1, - padding=3, bias=False), - get_norm(cfg.MODEL.DLA.NORM, channels[0]), - nn.ReLU(inplace=True)) - self.level0 = self._make_conv_level( - channels[0], channels[0], levels[0]) - self.level1 = self._make_conv_level( - channels[0], channels[1], levels[1], stride=2) - self.level2 = Tree(cfg, levels[2], block, channels[1], channels[2], 2, - level_root=False, - root_residual=residual_root) - self.level3 = Tree(cfg, levels[3], block, channels[2], channels[3], 2, - level_root=True, root_residual=residual_root) - self.level4 = Tree(cfg, levels[4], block, channels[3], channels[4], 2, - level_root=True, root_residual=residual_root) - self.level5 = Tree(cfg, levels[5], block, channels[4], channels[5], 2, - level_root=True, root_residual=residual_root) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - - self.load_pretrained_model( - data='imagenet', name='dla34', hash='ba72cf86') - - def load_pretrained_model(self, data, name, hash): - model_url = get_model_url(data, name, hash) - model_weights = model_zoo.load_url(model_url) - del model_weights['fc.weight'] - del model_weights['fc.bias'] - print('Loading pretrained DLA!') - self.load_state_dict(model_weights, strict=True) - - def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): - modules = [] - for i in range(convs): - modules.extend([ - nn.Conv2d(inplanes, planes, kernel_size=3, - stride=stride if i == 0 else 1, - padding=dilation, bias=False, dilation=dilation), - get_norm(self.cfg.MODEL.DLA.NORM, planes), - nn.ReLU(inplace=True)]) - inplanes = planes - return nn.Sequential(*modules) - - def forward(self, x): - y = {} - x = self.base_layer(x) - for i in range(6): - name = 'level{}'.format(i) - x = getattr(self, name)(x) - y['dla{}'.format(i)] = x - return y - - -def fill_up_weights(up): - w = up.weight.data - f = math.ceil(w.size(2) / 2) - c = (2 * f - 1 - f % 2) / (2. * f) - for i in range(w.size(2)): - for j in range(w.size(3)): - w[0, 0, i, j] = \ - (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c)) - for c in range(1, w.size(0)): - w[c, 0, :, :] = w[0, 0, :, :] - - -class Conv(nn.Module): - def __init__(self, chi, cho, norm): - super(Conv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(chi, cho, kernel_size=1, stride=1, bias=False), - get_norm(norm, cho), - nn.ReLU(inplace=True)) - - def forward(self, x): - return self.conv(x) - - -class DeformConv(nn.Module): - def __init__(self, chi, cho, norm): - super(DeformConv, self).__init__() - self.actf = nn.Sequential( - get_norm(norm, cho), - nn.ReLU(inplace=True) - ) - self.offset = Conv2d( - chi, 27, kernel_size=3, stride=1, - padding=1, dilation=1) - self.conv = ModulatedDeformConv( - chi, cho, kernel_size=3, stride=1, padding=1, - dilation=1, deformable_groups=1) - nn.init.constant_(self.offset.weight, 0) - nn.init.constant_(self.offset.bias, 0) - - def forward(self, x): - offset_mask = self.offset(x) - offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) - offset = torch.cat((offset_x, offset_y), dim=1) - mask = mask.sigmoid() - x = self.conv(x, offset, mask) - x = self.actf(x) - return x - - -class IDAUp(nn.Module): - def __init__(self, o, channels, up_f, norm='FrozenBN', node_type=Conv): - super(IDAUp, self).__init__() - for i in range(1, len(channels)): - c = channels[i] - f = int(up_f[i]) - proj = node_type(c, o, norm) - node = node_type(o, o, norm) - - up = nn.ConvTranspose2d(o, o, f * 2, stride=f, - padding=f // 2, output_padding=0, - groups=o, bias=False) - fill_up_weights(up) - - setattr(self, 'proj_' + str(i), proj) - setattr(self, 'up_' + str(i), up) - setattr(self, 'node_' + str(i), node) - - - def forward(self, layers, startp, endp): - for i in range(startp + 1, endp): - upsample = getattr(self, 'up_' + str(i - startp)) - project = getattr(self, 'proj_' + str(i - startp)) - layers[i] = upsample(project(layers[i])) - node = getattr(self, 'node_' + str(i - startp)) - layers[i] = node(layers[i] + layers[i - 1]) - - -DLAUP_NODE_MAP = { - 'conv': Conv, - 'dcn': DeformConv, -} - -class DLAUP(Backbone): - def __init__(self, bottom_up, in_features, norm, dlaup_node='conv'): - super(DLAUP, self).__init__() - assert isinstance(bottom_up, Backbone) - self.bottom_up = bottom_up - input_shapes = bottom_up.output_shape() - in_strides = [input_shapes[f].stride for f in in_features] - in_channels = [input_shapes[f].channels for f in in_features] - in_levels = [int(math.log2(input_shapes[f].stride)) for f in in_features] - self.in_features = in_features - out_features = ['dlaup{}'.format(l) for l in in_levels] - self._out_features = out_features - self._out_feature_channels = { - 'dlaup{}'.format(l): in_channels[i] for i, l in enumerate(in_levels)} - self._out_feature_strides = { - 'dlaup{}'.format(l): 2 ** l for l in in_levels} - - print('self._out_features', self._out_features) - print('self._out_feature_channels', self._out_feature_channels) - print('self._out_feature_strides', self._out_feature_strides) - self._size_divisibility = 32 - - node_type = DLAUP_NODE_MAP[dlaup_node] - - self.startp = int(math.log2(in_strides[0])) - self.channels = in_channels - channels = list(in_channels) - scales = np.array([2 ** i for i in range(len(out_features))], dtype=int) - for i in range(len(channels) - 1): - j = -i - 2 - setattr(self, 'ida_{}'.format(i), - IDAUp(channels[j], in_channels[j:], - scales[j:] // scales[j], - norm=norm, - node_type=node_type)) - scales[j + 1:] = scales[j] - in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]] - - @property - def size_divisibility(self): - return self._size_divisibility - - def forward(self, x): - bottom_up_features = self.bottom_up(x) - layers = [bottom_up_features[f] for f in self.in_features] - out = [layers[-1]] # start with 32 - for i in range(len(layers) - 1): - ida = getattr(self, 'ida_{}'.format(i)) - ida(layers, len(layers) - i - 2, len(layers)) - out.insert(0, layers[-1]) - ret = {} - for k, v in zip(self._out_features, out): - ret[k] = v - # import pdb; pdb.set_trace() - return ret - - -def dla34(cfg, pretrained=None): # DLA-34 - model = DLA(cfg, [1, 1, 1, 2, 2, 1], - [16, 32, 64, 128, 256, 512], - block=BasicBlock) - return model - - -class LastLevelP6P7(nn.Module): - """ - This module is used in RetinaNet to generate extra layers, P6 and P7 from - C5 feature. - """ - - def __init__(self, in_channels, out_channels): - super().__init__() - self.num_levels = 2 - self.in_feature = "dla5" - self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) - self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) - for module in [self.p6, self.p7]: - weight_init.c2_xavier_fill(module) - - def forward(self, c5): - p6 = self.p6(c5) - p7 = self.p7(F.relu(p6)) - return [p6, p7] - - -@BACKBONE_REGISTRY.register() -def build_dla_fpn3_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - - depth_to_creator = {"dla34": dla34} - bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=None, - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - - return backbone - -@BACKBONE_REGISTRY.register() -def build_dla_fpn5_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - - depth_to_creator = {"dla34": dla34} - bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - in_channels_top = bottom_up.output_shape()['dla5'].channels - - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelP6P7(in_channels_top, out_channels), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - - return backbone - - -@BACKBONE_REGISTRY.register() -def build_dlaup_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - - depth_to_creator = {"dla34": dla34} - bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg) - - backbone = DLAUP( - bottom_up=bottom_up, - in_features=cfg.MODEL.DLA.DLAUP_IN_FEATURES, - norm=cfg.MODEL.DLA.NORM, - dlaup_node=cfg.MODEL.DLA.DLAUP_NODE, - ) - - return backbone diff --git a/spaces/yooch/yooch/llama_func.py b/spaces/yooch/yooch/llama_func.py deleted file mode 100644 index c71027dd4e6f99c0c12626cbbf276f407877be04..0000000000000000000000000000000000000000 --- a/spaces/yooch/yooch/llama_func.py +++ /dev/null @@ -1,192 +0,0 @@ -import os -import logging - -from llama_index import GPTSimpleVectorIndex -from llama_index import download_loader -from llama_index import ( - Document, - LLMPredictor, - PromptHelper, - QuestionAnswerPrompt, - RefinePrompt, -) -from langchain.llms import OpenAI -import colorama - - -from presets import * -from utils import * - - -def get_documents(file_src): - documents = [] - index_name = "" - logging.debug("Loading documents...") - logging.debug(f"file_src: {file_src}") - for file in file_src: - logging.debug(f"file: {file.name}") - index_name += file.name - if os.path.splitext(file.name)[1] == ".pdf": - logging.debug("Loading PDF...") - CJKPDFReader = download_loader("CJKPDFReader") - loader = CJKPDFReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".docx": - logging.debug("Loading DOCX...") - DocxReader = download_loader("DocxReader") - loader = DocxReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".epub": - logging.debug("Loading EPUB...") - EpubReader = download_loader("EpubReader") - loader = EpubReader() - documents += loader.load_data(file=file.name) - else: - logging.debug("Loading text file...") - with open(file.name, "r", encoding="utf-8") as f: - text = add_space(f.read()) - documents += [Document(text)] - index_name = sha1sum(index_name) - return documents, index_name - - -def construct_index( - api_key, - file_src, - max_input_size=4096, - num_outputs=1, - max_chunk_overlap=20, - chunk_size_limit=600, - embedding_limit=None, - separator=" ", - num_children=10, - max_keywords_per_chunk=10, -): - os.environ["OPENAI_API_KEY"] = api_key - chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit - embedding_limit = None if embedding_limit == 0 else embedding_limit - separator = " " if separator == "" else separator - - llm_predictor = LLMPredictor( - llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key) - ) - prompt_helper = PromptHelper( - max_input_size, - num_outputs, - max_chunk_overlap, - embedding_limit, - chunk_size_limit, - separator=separator, - ) - documents, index_name = get_documents(file_src) - if os.path.exists(f"./index/{index_name}.json"): - logging.info("找到了缓存的索引文件,加载中……") - return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json") - else: - try: - logging.debug("构建索引中……") - index = GPTSimpleVectorIndex( - documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper - ) - os.makedirs("./index", exist_ok=True) - index.save_to_disk(f"./index/{index_name}.json") - return index - except Exception as e: - print(e) - return None - - -def chat_ai( - api_key, - index, - question, - context, - chatbot, -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.info(f"Question: {question}") - - response, chatbot_display, status_text = ask_ai( - api_key, - index, - question, - replace_today(PROMPT_TEMPLATE), - REFINE_TEMPLATE, - SIM_K, - INDEX_QUERY_TEMPRATURE, - context, - ) - if response is None: - status_text = "查询失败,请换个问法试试" - return context, chatbot - response = response - - context.append({"role": "user", "content": question}) - context.append({"role": "assistant", "content": response}) - chatbot.append((question, chatbot_display)) - - os.environ["OPENAI_API_KEY"] = "" - return context, chatbot, status_text - - -def ask_ai( - api_key, - index, - question, - prompt_tmpl, - refine_tmpl, - sim_k=1, - temprature=0, - prefix_messages=[], -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.debug("Index file found") - logging.debug("Querying index...") - llm_predictor = LLMPredictor( - llm=OpenAI( - temperature=temprature, - model_name="gpt-3.5-turbo-0301", - prefix_messages=prefix_messages, - ) - ) - - response = None # Initialize response variable to avoid UnboundLocalError - qa_prompt = QuestionAnswerPrompt(prompt_tmpl) - rf_prompt = RefinePrompt(refine_tmpl) - response = index.query( - question, - llm_predictor=llm_predictor, - similarity_top_k=sim_k, - text_qa_template=qa_prompt, - refine_template=rf_prompt, - response_mode="compact", - ) - - if response is not None: - logging.info(f"Response: {response}") - ret_text = response.response - nodes = [] - for index, node in enumerate(response.source_nodes): - brief = node.source_text[:25].replace("\n", "") - nodes.append( - f"
                        [{index+1}]\t{brief}...

                        {node.source_text}

                        " - ) - new_response = ret_text + "\n----------\n" + "\n\n".join(nodes) - logging.info( - f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}" - ) - os.environ["OPENAI_API_KEY"] = "" - return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens" - else: - logging.warning("No response found, returning None") - os.environ["OPENAI_API_KEY"] = "" - return None - - -def add_space(text): - punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "} - for cn_punc, en_punc in punctuations.items(): - text = text.replace(cn_punc, en_punc) - return text diff --git a/spaces/yukie/yukie-sovits3/losses.py b/spaces/yukie/yukie-sovits3/losses.py deleted file mode 100644 index 41f9be6980713a46824ae9ec5eb8fd7c515d89c5..0000000000000000000000000000000000000000 --- a/spaces/yukie/yukie-sovits3/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - #print(logs_p) - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/zhaoys/wfms-kuiwenc/src/app/page.tsx b/spaces/zhaoys/wfms-kuiwenc/src/app/page.tsx deleted file mode 100644 index e389d53d7e517c4cedb2816433ddced41a6e79b6..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/app/page.tsx +++ /dev/null @@ -1,18 +0,0 @@ -import dynamic from 'next/dynamic' -import './loading.css' - -const DynamicComponentWithNoSSR = dynamic( - () => import('../components/chat'), - { - ssr: false, - loading: () => ( -
                        - {Array.from({length: 3}).map((_, index) =>
                        )} -
                        - ) - } -) - -export default function IndexPage() { - return -} diff --git a/spaces/zhenwusw/JoJoGAN/e4e/utils/common.py b/spaces/zhenwusw/JoJoGAN/e4e/utils/common.py deleted file mode 100644 index b19e18ddcb78b06678fa18e4a76da44fc511b789..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/utils/common.py +++ /dev/null @@ -1,55 +0,0 @@ -from PIL import Image -import matplotlib.pyplot as plt - - -# Log images -def log_input_image(x, opts): - return tensor2im(x) - - -def tensor2im(var): - # var shape: (3, H, W) - var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy() - var = ((var + 1) / 2) - var[var < 0] = 0 - var[var > 1] = 1 - var = var * 255 - return Image.fromarray(var.astype('uint8')) - - -def vis_faces(log_hooks): - display_count = len(log_hooks) - fig = plt.figure(figsize=(8, 4 * display_count)) - gs = fig.add_gridspec(display_count, 3) - for i in range(display_count): - hooks_dict = log_hooks[i] - fig.add_subplot(gs[i, 0]) - if 'diff_input' in hooks_dict: - vis_faces_with_id(hooks_dict, fig, gs, i) - else: - vis_faces_no_id(hooks_dict, fig, gs, i) - plt.tight_layout() - return fig - - -def vis_faces_with_id(hooks_dict, fig, gs, i): - plt.imshow(hooks_dict['input_face']) - plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input']))) - fig.add_subplot(gs[i, 1]) - plt.imshow(hooks_dict['target_face']) - plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']), - float(hooks_dict['diff_target']))) - fig.add_subplot(gs[i, 2]) - plt.imshow(hooks_dict['output_face']) - plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target']))) - - -def vis_faces_no_id(hooks_dict, fig, gs, i): - plt.imshow(hooks_dict['input_face'], cmap="gray") - plt.title('Input') - fig.add_subplot(gs[i, 1]) - plt.imshow(hooks_dict['target_face']) - plt.title('Target') - fig.add_subplot(gs[i, 2]) - plt.imshow(hooks_dict['output_face']) - plt.title('Output') diff --git a/spaces/zomehwh/sovits-tannhauser/vdecoder/hifigan/nvSTFT.py b/spaces/zomehwh/sovits-tannhauser/vdecoder/hifigan/nvSTFT.py deleted file mode 100644 index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-tannhauser/vdecoder/hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 32000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 32000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/zomehwh/sovits-teio/hubert/__init__.py b/spaces/zomehwh/sovits-teio/hubert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/zwhe99/MAPS-mt/model/alpaca/__init__.py b/spaces/zwhe99/MAPS-mt/model/alpaca/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/zzz666/ChuanhuChatGPT/modules/shared.py b/spaces/zzz666/ChuanhuChatGPT/modules/shared.py deleted file mode 100644 index 4046900a39b2fc7bdd8005844a92dc7d4eb669b6..0000000000000000000000000000000000000000 --- a/spaces/zzz666/ChuanhuChatGPT/modules/shared.py +++ /dev/null @@ -1,24 +0,0 @@ -from modules.presets import API_URL - -class State: - interrupted = False - api_url = API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_url(self, api_url): - self.api_url = api_url - - def reset_api_url(self): - self.api_url = API_URL - return self.api_url - - def reset_all(self): - self.interrupted = False - self.api_url = API_URL - -state = State()