diff --git a/spaces.zip b/spaces.zip
deleted file mode 100644
index 0d3a13834f7afd0028aeafd016fa14c7f79e58d0..0000000000000000000000000000000000000000
--- a/spaces.zip
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:fbb4b253de8e51bfa330e5c7cf31f7841e64ef30c1718d4a05c75e21c8ccf729
-size 671941275
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film Chokher Bali Aishwarya Rai and Raima Sen in a Riveting Bengali Drama - Download Here.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film Chokher Bali Aishwarya Rai and Raima Sen in a Riveting Bengali Drama - Download Here.md
deleted file mode 100644
index 8315fbe4bc86cc0fc75869f9d3ea04d945ece2d0..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Film Chokher Bali Aishwarya Rai and Raima Sen in a Riveting Bengali Drama - Download Here.md
+++ /dev/null
@@ -1,122 +0,0 @@
-
-
Film Chokher Bali Full Movie Download: A Review of the Bengali Drama Based on Rabindranath Tagore's Novel
-
If you are looking for a film that explores the complexities of human relationships, emotions and morality, you should watch Chokher Bali, a 2003 Bengali drama film directed by Rituparno Ghosh and based on Rabindranath Tagore's 1903 novel of the same name. The film stars Aishwarya Rai Bachchan, Raima Sen, Prosenjit Chatterjee, Tota Roy Chowdhury and Lily Chakravarty in pivotal roles.
-
The film tells the story of Binodini, a young widow who comes to live with a woman and her son Mahendra, who had once rejected her as a prospective bride. Binodini soon develops a friendship with Mahendra's wife Ashalata, but also an attraction for Mahendra himself. This leads to a web of deceit, adultery, jealousy and revenge that affects all their lives.
The film won several awards and accolades, including the National Film Award for Best Feature Film in Bengali, and was screened at various international film festivals. It was also dubbed into Hindi and released worldwide.
-
In this article, we will review Chokher Bali in detail, covering its plot, cast, direction, music, reception and impact. We will also tell you how you can download or stream this film online legally.
-
The novel Chokher Bali
-
Before we dive into the film adaptation, let us first understand the source material that inspired it. Chokher Bali is a novel written by Rabindranath Tagore, one of India's most celebrated writers and Nobel laureates. The novel was first published in 1903 in Bengali as a serial in a magazine called Bangadarshan.
-
The novel is set in late 19th century Bengal, during the British colonial rule. It revolves around four main characters: Binodini, a young widow who is intelligent, beautiful and ambitious; Mahendra, a wealthy landowner who is spoiled and impulsive; Ashalata, his naive and devoted wife who is unaware of his flaws; and Behari, his friend who is noble and upright.
-
The novel explores how these four characters interact with each other under different circumstances, revealing their personalities, desires, conflicts and dilemmas. It also depicts how they are influenced by their social environment, which imposes strict norms on women's roles, marriage customs and widowhood practices.
-
The novel is considered to be one of Tagore's finest works, as it showcases his mastery of storytelling, characterization, dialogue and symbolism. It also deals with themes such as love, friendship, betrayal, passion, loyalty, sacrifice and redemption.
-
The film adaptation
-
How did Rituparno Ghosh translate Tagore's novel into a cinematic masterpiece? Let us look at some of the aspects that make Chokher Bali a remarkable film adaptation.
-
Chokher Bali Aishwarya Rai movie download
-Watch Chokher Bali online free streaming
-Chokher Bali Bengali film based on Rabindranath Tagore novel
-Download Chokher Bali 2003 movie with subtitles
-Chokher Bali Rituparno Ghosh directorial download
-Chokher Bali Raima Sen and Prosenjit Chatterjee movie
-How to download Chokher Bali movie in HD quality
-Chokher Bali movie review and ratings
-Chokher Bali movie plot and summary
-Chokher Bali movie awards and nominations
-Chokher Bali movie songs and music download
-Chokher Bali movie trailer and teaser download
-Chokher Bali movie cast and crew details
-Chokher Bali movie release date and box office collection
-Chokher Bali movie scenes and dialogues download
-Chokher Bali movie behind the scenes and making of videos
-Chokher Bali movie wallpapers and posters download
-Chokher Bali movie trivia and facts
-Chokher Bali movie quotes and memorable lines
-Chokher Bali movie analysis and interpretation
-Chokher Bali movie comparison with novel and other adaptations
-Chokher Bali movie controversies and criticisms
-Chokher Bali movie fan theories and speculations
-Chokher Bali movie fan art and cosplay download
-Chokher Bali movie merchandise and products buy online
-Chokher Bali Aishwarya Rai best performance download
-Watch Chokher Bali with English subtitles online free
-Download Chokher Bali full movie in Hindi dubbed
-Download Chokher Bali full movie in Tamil dubbed
-Download Chokher Bali full movie in Telugu dubbed
-Download Chokher Bali full movie in Malayalam dubbed
-Download Chokher Bali full movie in Kannada dubbed
-Download Chokher Bali full movie in Marathi dubbed
-Download Chokher Bali full movie in Gujarati dubbed
-Download Chokher Bali full movie in Punjabi dubbed
-Download Chokher Bali full movie in Urdu dubbed
-Download Chokher Bali full movie in Nepali dubbed
-Download Chokher Bali full movie in Sinhala dubbed
-Download Chokher Bali full movie in Bhojpuri dubbed
-Download Chokher Bali full movie in Odia dubbed
-Download Chokher Bali full movie in Assamese dubbed
-Download Chokher Bali full movie in Bangladeshi Bengali dubbed
-Download Chokher Bali full movie in 480p 720p 1080p resolution
-Download Chokher Bali full movie from torrent sites
-Download Chokher Bali full movie from legal platforms
-Download Chokher Bali full movie from Internet Archive site[^2^]
-Watch or download Choker Bali: A Passion Play (2003) - IMDb[^3^]
-Watch or download Choker bali (2003) - Rotten Tomatoes
-Watch or download চোখের বালি (2003) - Letterboxd
-Watch or download Sand in the Eye (2003) - MUBI
-
The screenplay
-
Ghosh wrote the screenplay for Chokher Bali, keeping in mind both the essence and the relevance of Tagore's novel. He retained most of the plot and the dialogues from the original text, but also made some changes to suit the medium and the audience of cinema.
-
For instance, he condensed some of the subplots and the minor characters to focus more on the main quartet of Binodini, Mahendra, Ashalata and Behari. He also added some scenes and details that were not present in the novel, such as Binodini's visit to Varanasi, Mahendra's affair with Sudeshna, and Binodini's letter to Behari at the end.
-
Ghosh also updated some aspects of the novel to make them more relatable to contemporary viewers. For example, he changed some names, locations, dates, and costumes to reflect more accurately the historical period and the cultural context of late 19th century Bengal. He also used more colloquial language, humor, and irony to make the dialogues more lively, witty, and realistic.
-
The cinematography
-
Another element that makes Chokher Bali a visually stunning film is its cinematography by Avik Mukhopadhyay. Mukhopadhyay used various techniques such as lighting, framing, color, and movement to capture both the beauty and the emotions of the film.
-
For example, he used natural light and soft colors to create a warm and romantic atmosphere in the scenes between Mahendra and Ashalata. He used dark shadows and contrasting colors to create a tense and dramatic mood in the scenes between Mahendra and Binodini. He used wide shots and long takes to show the grandeur and diversity of Bengal's landscape. He used close-ups and quick cuts to show the expressions and reactions of the characters.
-
The music
-
The music for Chokher Bali was composed by Debojyoti Mishra, who created both the background score and the songs for the film. The music enhanced both the mood and the meaning of the film.
-
For example, he used classical instruments such as sitar, tabla, flute, and sarangi to create a traditional sound that matched with Bengal's culture. He used western instruments such as piano, violin, g uitar, and cello to create a modern sound that matched with the film's style. He used different genres such as classical, folk, rock, and jazz to create a diverse sound that matched with the film's mood. He used lyrics by Tagore himself, as well as by other poets such as Jibanananda Das, Nazrul Islam, and Sukanta Bhattacharya to create songs that matched with the film's theme.
-
Some of the songs that stand out in Chokher Bali are: - Era Shukher Lagi: A fusion of two Tagore songs that express Binodini's longing for Mahendra and her frustration with Ashalata. The song features multiple singers such as Srabani Sen, Chandrabali Rudra Datta, and others. - Prothom Dekha: A rock song that plays during the opening credits of the film and sets the tone for the story. The song is sung by Anurag Saikia and has lyrics by Jibanananda Das. - Unmadona: A folk song that plays during a boat ride scene where Binodini and Behari share a moment of intimacy. The song is sung by Srikanto Acharya and has lyrics by Nazrul Islam.
-
The cast and performances
-
One of the most crucial aspects of Chokher Bali is its cast and performances. The film features some of the finest actors of Indian cinema, who deliver stellar performances that bring Tagore's characters to life.
-
Aishwarya Rai Bachchan as Binodini
-
Aishwarya Rai Bachchan plays the role of Binodini, the young widow who is intelligent, beautiful and ambitious. She is also manipulative, cunning and restless. She becomes a constant irritant in the lives of her hosts, as she seduces Mahendra, befriends Ashalata, and spurns Behari.
-
Aishwarya Rai Bachchan gives one of her best performances in Chokher Bali, as she portrays the complexity and depth of Binodini's character. She shows her charm, grace and elegance, as well as her vulnerability, anger and pain. She also speaks fluent Bengali, which adds to her authenticity.
-
Raima Sen as Ashalata
-
Raima Sen plays the role of Ashalata, the innocent and naive wife of Mahendra. She is unaware of his flaws and loves him unconditionally. She also develops a friendship with Binodini, whom she calls Chokher Bali (sand in the eye). She becomes a victim of Binodini's schemes, as she loses her husband's love and her own dignity.
-
Raima Sen gives a convincing performance in Chokher Bali, as she portrays the simplicity and sweetness of Ashalata's character. She shows her innocence, loyalty and devotion, as well as her confusion, betrayal and sorrow. She also has a natural chemistry with Aishwarya Rai Bachchan, which makes their friendship believable.
-
Prosenjit Chatterjee as Mahendra
-
Prosenjit Chatterjee plays the role of Mahendra, the wealthy landowner who is spoiled and impulsive. He is also self-obsessed, immature and fickle. He marries Ashalata out of his mother's wish, but soon falls for Binodini's charms. He neglects his wife, cheats on his friend, and hurts both women.
-
Prosenjit Chatterjee gives a powerful performance in Chokher Bali, as he portrays the flaws and weaknesses of Mahendra's character. He shows his arrogance, passion and impulsiveness, as well as his guilt, regret and remorse. He also has a strong screen presence, which makes him a formidable antagonist.
-
Tota Roy Chowdhury as Behari
-
Tota Roy Chowdhury plays the role of Behari, the loyal and honorable friend of Mahendra. He is also noble, upright and principled. He respects his elders, cares for his friends, and follows his values. He tries to resist Binodini's advances, but eventually falls in love with her. He also tries to help Ashalata, but fails to save her.
-
Tota Roy Chowdhury gives a subtle performance in Chokher Bali, as he portrays the virtues and dilemmas of Behari's character. He shows his dignity, integrity and sincerity, as well as his conflict, hesitation and frustration. He also has a good rapport with Prosenjit Chatterjee, which makes their friendship realistic.
-
Lily Chakravarty as Rajlakshmi
-
Lily Chakravarty plays the role of Rajlakshmi, the mother of Mahendra who arranges his marriage with Ashalata. She is also the one who invites Binodini to stay with them, unaware of her intentions. She is a traditional woman who follows the customs and norms of her society. She loves her son dearly, but also scolds him for his mistakes.
-
Lily Chakravarty gives a memorable performance in Chokher Bali, as she portrays the authority and affection of Rajlakshmi's character. She shows her sternness , wisdom and concern, as well as her warmth, humor and kindness. She also has a natural bond with Raima Sen, which makes their mother-daughter relationship touching.
-
The reception and impact of the film
-
How was Chokher Bali received by critics and audiences in India and abroad? Let us look at some of the aspects that make Chokher Bali a successful and influential film.
-
The critical acclaim
-
Chokher Bali received rave reviews from critics, who praised its direction, screenplay, cinematography, music, and performances. The film was hailed as a faithful and artistic adaptation of Tagore's novel, as well as a compelling and relevant portrayal of human emotions and relationships.
-
The film won several awards and nominations, both nationally and internationally. Some of the notable ones are: - National Film Award for Best Feature Film in Bengali - National Film Award for Best Costume Design - National Film Award for Best Art Direction - Golden Leopard nomination at the Locarno International Film Festival - Official Selection at the Toronto International Film Festival - Official Selection at the Chicago International Film Festival - Official Selection at the Karlovy Vary International Film Festival - Official Selection at the Cairo International Film Festival - Official Selection at the London Film Festival
-
The box office success
-
Chokher Bali was also a commercial hit, as it became one of the highest-grossing Bengali films of 2003. The film attracted both urban and rural audiences, who appreciated its story, style and star cast. The film also appealed to non-Bengali audiences, who were exposed to Tagore's literature and Bengali culture.
-
The film was later dubbed into Hindi and released internationally in 2004. The film received a positive response from overseas viewers, who admired its quality and content. The film also generated interest in other Bengali films and filmmakers, who gained more recognition and exposure.
-
The cultural significance
-
Chokher Bali had a lasting impact on the cultural scene of India and beyond. The film revived the interest in Tagore's works, especially his novels, which were often overshadowed by his poems and songs. The film also inspired other adaptations of his novels, such as Noukadubi (2011) by Rituparno Ghosh and Charulata (2012) by Agnidev Chatterjee.
-
The film also contributed to the growth and development of Bengali cinema, which was undergoing a revival in the early 2000s. The film showcased the talent and potential of Bengali filmmakers, actors, technicians and musicians, who created world-class cinema with limited resources. The film also paved the way for more collaborations between Bengali and Hindi cinema industries, which enriched both cultures.
-
Conclusion
-
In conclusion, Chokher Bali is a remarkable film that showcases Tagore's timeless story and Ghosh's artistic vision. The film explores the complexities of human relationships, emotions and morality with sensitivity and sophistication. The film features a stellar cast and crew, who deliver outstanding performances and technical excellence. The film received critical acclaim and commercial success, both in India and abroad. The film also had a lasting impact on the cultural scene of India and beyond.
-
If you are looking for a film that will make you think, feel and appreciate the beauty of cinema, you should watch Chokher Bali. You can download or stream this film online from legal sources such as YouTube , Amazon Prime Video , or Hotstar . You can also buy or rent this film on DVD or Blu-ray from online or offline stores.
-
Frequently Asked Questions
-
-
Q: What is the meaning of Chokher Bali?
-
A: Chokher Bali literally means sand in the eye, which is a metaphor for a constant irritant or troublemaker. In the film, Binodini is called Chokher Bali by Ashalata, as she becomes a source of disturbance in her life.
-
Q: Is Chokher Bali based on a true story?
-
A: Chokher Bali is based on a novel by Rabindranath Tagore, which is a fictional story inspired by his observations of society and human nature. However, some critics have speculated that Tagore may have drawn some elements from his own life or from his acquaintances.
-
Q: How did Aishwarya Rai Bachchan prepare for her role as Binodini?
-
A: Aishwarya Rai Bachchan prepared for her role as Binodini by reading Tagore's novel, learning Bengali language and culture, and working closely with the director and the co-stars. She also wore authentic costumes and jewelry, and followed the mannerisms and etiquette of a Bengali widow.
-
Q: What is the significance of the boat ride scene in the film?
-
A: The boat ride scene in the film is a pivotal moment in the story, as it marks the turning point in the relationships between the four main characters. It is also a symbolic scene, as it represents the journey of life, where people meet, part, and face various challenges and changes.
-
Q: What is the message of Chokher Bali?
-
A: Chokher Bali has multiple messages, depending on the perspective of the viewer. Some of the possible messages are: - The importance of honesty, loyalty and respect in relationships. - The consequences of selfishness, deception and infidelity in relationships. - The struggle of women against social oppression and discrimination. - The power of love, friendship and forgiveness in overcoming difficulties and differences.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Defraggler Pro 2018 Latest Key Crack Keygen Free Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Defraggler Pro 2018 Latest Key Crack Keygen Free Download.md
deleted file mode 100644
index 77a951d1cc3754e67c98f625adce831cb1f38bf0..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Defraggler Pro 2018 Latest Key Crack Keygen Free Download.md
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
Defraggler Pro 2018: How to Speed Up Your PC with This Powerful Tool
-
If you are looking for a way to improve the performance of your computer, you might want to consider using Defraggler Pro 2018. This is a software that can defragment your hard drive and optimize your file system, making your PC run faster and smoother.
-
Defragmentation is the process of rearranging the data on your hard drive so that it is stored in contiguous blocks. This reduces the amount of time that your computer needs to access and read files, as well as the wear and tear on your hardware. Defragmentation can also free up some disk space by eliminating gaps and fragments.
-
Defraggler Pro 2018 Latest Key Crack Keygen Free Download
Defraggler Pro 2018 is one of the best defragmentation tools on the market. It has several features that make it stand out from other similar software, such as:
-
-
It can defragment individual files, folders, or the entire drive.
-
It can defragment free space to prevent future fragmentation.
-
It can move large files to the end of the drive to improve access speed.
-
It can analyze your drive and show you a detailed report of its condition and fragmentation level.
-
It can run in the background or schedule automatic defragmentation at a convenient time.
-
It supports NTFS and FAT32 file systems, as well as SSDs and external drives.
-
-
To use Defraggler Pro 2018, you need to download it from the official website and install it on your PC. You can then launch it and select the drive or file that you want to defragment. You can also choose from different options and settings to customize your defragmentation process. Once you start the defragmentation, you can monitor its progress and see how much space and time you are saving.
-
Defraggler Pro 2018 is not a free software, but you can try it for 30 days without any limitations. If you want to continue using it after the trial period, you need to purchase a license key that will activate the full version. The license key costs $24.95 and it is valid for one year and one PC. You can also get discounts if you buy multiple licenses or renew your subscription.
-
If you want to speed up your PC and make it more efficient, you should definitely give Defraggler Pro 2018 a try. It is a powerful tool that can optimize your hard drive and improve your computer's performance. You can download it from here and start your free trial today.
-
-
Why You Need to Defragment Your Hard Drive
-
Many people do not realize the importance of defragmenting their hard drive regularly. They might think that it is a complicated or unnecessary task that does not affect their computer's performance. However, this is not true. Defragmenting your hard drive can have many benefits for your PC, such as:
-
-
It can speed up your boot time and application launch time.
-
It can reduce the risk of data corruption and loss.
-
It can extend the lifespan of your hard drive and prevent overheating.
-
It can improve your system stability and security.
-
-
When you use your computer, you create, modify, delete, and move files constantly. This causes your hard drive to become fragmented over time. Fragmentation means that your files are split into many pieces and scattered across different locations on your disk. This makes it harder for your computer to find and access them, resulting in slower performance and more disk activity.
-
Defragmenting your hard drive can solve this problem by reorganizing your files and placing them in contiguous blocks. This way, your computer can read and write them faster and more efficiently, saving you time and energy. Defragmenting your hard drive can also free up some disk space by eliminating gaps and fragments that are not used by any files.
-
-
How to Use Defraggler Pro 2018 Effectively
-
Defraggler Pro 2018 is a user-friendly and versatile software that can help you defragment your hard drive easily and quickly. It has a simple and intuitive interface that allows you to perform various tasks with just a few clicks. Here are some tips on how to use Defraggler Pro 2018 effectively:
-
-
-
Analyze your drive before defragmenting it. This will show you how much fragmentation there is and how much space and time you can save by defragmenting it. You can also see a graphical representation of your drive's condition and fragmentation level.
-
Select the appropriate mode for defragmenting your drive. You can choose between Quick Defrag, which is faster but less thorough, or Full Defrag, which is slower but more comprehensive. You can also select Defrag Free Space, which will defragment the empty space on your drive to prevent future fragmentation.
-
Customize your defragmentation process according to your needs and preferences. You can change the priority of the defragmentation process, the amount of system resources it uses, the frequency of updates, and the actions to take after completion. You can also exclude certain files or folders from being defragmented if you want to.
-
Schedule automatic defragmentation at a convenient time. You can set Defraggler Pro 2018 to run automatically at a specific time or interval, such as daily, weekly, monthly, or when the system is idle. This way, you can keep your hard drive optimized without having to remember or interfere with it.
-
-
Defraggler Pro 2018 is a powerful tool that can make a big difference in your computer's performance and health. It is easy to use and offers many options and features that suit different needs and situations. You can download it from here and start your free trial today.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/APKMirror vs Google Play Store Which One is Better for Android 4.0 Users?.md b/spaces/1phancelerku/anime-remove-background/APKMirror vs Google Play Store Which One is Better for Android 4.0 Users?.md
deleted file mode 100644
index 8fdf76db26f8063998bff6787c4b748ab1d5df11..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/APKMirror vs Google Play Store Which One is Better for Android 4.0 Users?.md
+++ /dev/null
@@ -1,147 +0,0 @@
-
-
How to Install Google Play Store on Android 4.0 Devices
-
If you have an Android device that runs on version 4.0 (Ice Cream Sandwich), you may have noticed that it does not come with the Google Play Store app pre-installed. This means that you cannot access the millions of apps, games, books, movies, and music that are available on the official app store for Android devices.
-
However, there is a way to install Google Play Store on your Android 4.0 device using a third-party website called APKMirror. APKMirror is a trusted source of APK files, which are the installation packages for Android apps. By downloading and installing the latest version of Google Play Store from APKMirror, you can enjoy all the benefits of having the official app store on your older device.
In this article, we will show you how to install Google Play Store on your Android 4.0 device using APKMirror. We will also show you how to use Google Play Store on your device and how to customize and secure it.
-
Requirements for Installing Google Play Store
-
Before you start installing Google Play Store on your Android 4.0 device, you need to make sure that your device meets the following requirements:
-
-
Your device must be compatible with Android 4.0 or higher. You can check your device's Android version by going to Settings > About phone > Android version.
-
Your device must have an internet connection, either Wi-Fi or mobile data.
-
Your device must allow installation of apps from unknown sources. You can enable this option by going to Settings > Security > Unknown sources.
-
-
Steps for Installing Google Play Store
-
Once you have met the requirements above, you can follow these steps to install Google Play Store on your Android 4.0 device:
Search for "Google Play Store" in the search box and tap on the result.
-
Select the latest version of Google Play Store that is compatible with your device's architecture (arm or x86) and DPI (dots per inch). You can check your device's architecture and DPI by using an app like CPU-Z.
-
Tap on the "Download APK" button and wait for the file to download to your device.
-
Once the download is complete, open the file and tap on "Install" to start the installation process.
-
Wait for the installation to finish and then tap on "Open" to launch Google Play Store.
-
-
Congratulations! You have successfully installed Google Play Store on your Android 4.0 device. You can now access and use Google Play Store as you would on any other Android device.
-
Troubleshooting Tips for Installing Google Play Store
-
Sometimes, you may encounter some issues while installing or using Google Play Store on your Android 4.0 device. Here are some common problems and their solutions:
-
-
If you get an error message saying "Parse error" or "There was a problem parsing the package" when you try to open the APK file, it means that the file is corrupted or incompatible with your device. Try downloading the file again from a different source or choose a different version that matches your device's specifications.
-
If you get an error message saying "App not installed" or "Installation failed" when you try to install the APK file, it means that there is not enough storage space on your device or that there is a conflict with an existing app. Try freeing up some space on your device by deleting unwanted files or apps, or uninstall any previous versions of Google Play Store that may be installed on your device.
-
If you get an error message saying "Google Play Services are updating" or "Google Play Services has stopped" when you try to use Google Play Store, it means that the app depends on another app called Google Play Services, which needs to be updated or installed. Try updating or installing Google Play Services from APKMirror using the same steps as above, or wait for the app to update automatically.
-
If you get an error message saying "This app is incompatible with your device" or "This app is not available in your country" when you try to download or install an app from Google Play Store, it means that the app is not designed for your device's hardware or software, or that the app is restricted by the developer or the government in your region. Try searching for an alternative app that offers similar features or functions, or use a VPN service to change your location and bypass the restrictions.
-
-
How to Use Google Play Store on Android 4.0 Devices
-
Now that you have installed Google Play Store on your Android 4.0 device, you can start using it to browse, download, update, and manage apps on your device. Google Play Store offers a variety of features and functions that make it easy and convenient to find and use apps on your device.
-
How to install apkmirror app on android 4.0 devices
-Download google play store apk for android 4.0 from apkmirror
-Best apps for android 4.0 available on google play store and apkmirror
-Google play store vs apkmirror: which one is better for android 4.0 users
-Apkmirror installer: a helper app to install apkm, xapk, and apks files on android 4.0
-Google play store (android tv) 36.0.15 apk download from apkmirror
-Apkmirror: a safe and trusted source for google play store apps on android 4.0
-How to update google play store on android 4.0 using apkmirror
-How to fix google play store errors on android 4.0 with apkmirror
-How to sideload google play store apps on android 4.0 using apkmirror
-How to backup and restore google play store apps on android 4.0 with apkmirror
-How to uninstall google play store updates on android 4.0 using apkmirror
-How to enable dark mode on google play store for android 4.0 with apkmirror
-How to download and install google play services on android 4.0 from apkmirror
-How to get the latest version of google play store on android 4.0 with apkmirror
-How to install google play store modded apk on android 4.0 from apkmirror
-How to download and install google play games on android 4.0 from apkmirror
-How to use google play store gift cards on android 4.0 with apkmirror
-How to download and install google play music on android 4.0 from apkmirror
-How to download and install google play books on android 4.0 from apkmirror
-How to download and install google play movies & tv on android 4.0 from apkmirror
-How to download and install google play newsstand on android 4.0 from apkmirror
-How to download and install google play podcasts on android 4.0 from apkmirror
-How to download and install google play protect on android 4.0 from apkmirror
-How to download and install google play rewards on android 4.0 from apkmirror
-How to download and install google assistant on android 4.0 from apkmirror
-How to download and install google lens on android 4.0 from apkmirror
-How to download and install google photos on android 4.0 from apkmirror
-How to download and install google drive on android 4.0 from apkmirror
-How to download and install google docs on android 4.0 from apkmirror
-How to download and install google sheets on android 4.0 from apkmirror
-How to download and install google slides on android 4.0 from apkmirror
-How to download and install google forms on android 4.0 from apkmirror
-How to download and install google calendar on android 4.0 from apkmirror
-How to download and install google keep on android 4.0 from apkmirror
-How to download and install google tasks on android 4.0 from apkmirror
-How to download and install google contacts on android 4.0 from apkmirror
-How to download and install google maps on android 4.0 from apkmirror
-How to download and install google earth on android 4.0 from apkmirror
-How to download and install google street view on android 4.0 from apkmirror
-How to download and install google translate on android 4.0 from apkmirror
-How to download and install google chrome on android 4.0 from apkmirror
-How to download and install gmail on android 4.0 from apkmirror
-How to download and install youtube on android 4.0 from apkmirror
-How to download and install youtube music on android 4.0 from apkmirror
-How to download and install youtube kids on android 4.0 from apkmirror
-How to download and install youtube studio on android 4.0 from apkmirror
-How to download and install youtube tv on android 4.0 from apkmirror
-
How to Browse and Download Apps from Google Play Store
-
One of the main functions of Google Play Store is to allow you to browse and download apps from a huge collection of categories, such as games, social, education, entertainment, and more. You can also filter apps by ratings, reviews, popularity, and other criteria. Here is how to browse and download apps from Google Play Store:
-
-
Open Google Play Store on your device and tap on the menu icon (three horizontal lines) at the top left corner of the screen.
-
Select a category that interests you, such as "Games" or "Apps". You can also tap on the search icon (magnifying glass) at the top right corner of the screen and enter a keyword or phrase related to the app you are looking for.
-
Browse through the list of apps that match your criteria and tap on the one that you want to download. You can also tap on the app's name or icon to view more details about it, such as its description, screenshots, ratings, reviews, and permissions.
-
Tap on the "Install" button to start downloading and installing the app on your device. You may need to accept some terms and conditions before proceeding. You can also tap on the "Add to Wishlist" button to save the app for later.
-
Wait for the download and installation to complete and then tap on "Open" to launch the app. You can also find the app in your device's app drawer or home screen.
-
-
How to Update and Manage Apps from Google Play Store
-
Another function of Google Play Store is to allow you to update and manage apps on your device. Updating apps ensures that they have the latest features, bug fixes, and security patches. Managing apps allows you to uninstall, disable, or move apps from your device's internal storage to an external storage (such as an SD card). Here is how to update and manage apps from Google Play Store:
-
-
Open Google Play Store on your device and tap on the menu icon (three horizontal lines) at the top left corner of the screen.
-
Select "My apps & games" to view a list of apps that are installed on your device. You can also tap on the "Library" tab to view a list of apps that you have previously installed or purchased but are not currently on your device.
-
To update an app, tap on the "Update" button next to the app's name. You can also tap on the "Update all" button at the top of the screen to update all apps at once. You may need to accept some terms and conditions before proceeding.
-
To manage an app, tap on the app's name or icon to open its details page. You can then tap on the "Uninstall" button to remove the app from your device, or tap on the "Disable" button to prevent the app from running in the background. You can also tap on the "Storage" option to view and change the app's storage location, or tap on the "Permissions" option to view and change the app's access to your device's features and data.
-
-
How to Customize and Secure Google Play Store
-
The last function of Google Play Store is to allow you to customize and secure it according to your preferences and needs. Customizing Google Play Store allows you to change its language, notifications, parental controls, and backup options. Securing Google Play Store allows you to protect your account and device from malicious apps and unauthorized access. Here is how to customize and secure Google Play Store:
-
-
Open Google Play Store on your device and tap on the menu icon (three horizontal lines) at the top left corner of the screen.
-
Select "Settings" to access the various options for customizing and securing Google Play Store.
-
To customize Google Play Store, you can change the following settings:
-
General: You can change the language of Google Play Store, enable or disable auto-update apps, enable or disable auto-add widgets, and enable or disable smart downloads.
-
Notifications: You can enable or disable notifications for updates, pre-registrations, deals, rewards, and more.
-
Family: You can set up parental controls to restrict the content that is available for download based on age ratings, categories, and ratings systems. You can also create a family group to share apps, games, books, and movies with your family members.
-
Backup & restore: You can enable or disable backup of your app data to your Google account, and restore your app data from a previous backup.
-
-
-
To secure Google Play Store, you can change the following settings:
-
Account: You can sign in or out of your Google account, manage your payment methods, subscriptions, rewards, and order history.
-
Security: You can enable or disable Play Protect, which scans your device for harmful apps and warns you before installing them. You can also enable or disable app verification, which checks if apps are safe before installing them from sources other than Google Play Store.
-
Privacy: You can manage your personal information, activity controls, ad settings, and location settings.
-
-
-
-
Conclusion
-
In this article, we have shown you how to install Google Play Store on your Android 4.0 device using APKMirror. We have also shown you how to use Google Play Store on your device and how to customize and secure it. By installing Google Play Store on your older device, you can enjoy all the benefits of having access to millions of apps, games, books, movies, and music that are available on the official app store for Android devices.
-
We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
Here are some frequently asked questions about installing Google Play Store on Android 4.0 devices:
-
Q: Is it safe to install Google Play Store from APKMirror?
-
A: APKMirror is a reputable website that hosts APK files for Android apps. It verifies the authenticity and integrity of the files before uploading them. However, as with any third-party source, there is always a risk of downloading malicious or infected files. Therefore, we recommend that you always scan the files with a reliable antivirus software before opening them.
-
Q: Can I install Google Play Store on any Android 4.0 device?
-
A: Not necessarily. Some Android 4.0 devices may not be compatible with Google Play Store due to hardware or software limitations. For example, some devices may not have enough storage space or RAM to run Google Play Store smoothly. Some devices may also have custom ROMs or firmware that may interfere with Google Play Store's functionality. Therefore, we advise that you check your device's compatibility before installing Google Play Store from APKMirror.
-
Q: How can I update Google Play Store on my Android 4.0 device?
-
A: Google Play Store usually updates itself automatically when a new version is available. However, if you want to update it manually, you can follow the same steps as above to download and install the latest version of Google Play Store from APKMirror. Alternatively, you can also go to Settings > Apps > Google Play Store > Menu > Uninstall updates and then reinstall the updates from Google Play Store itself.
-
Q: How can I uninstall Google Play Store from my Android 4.0 device?
-
A: If you want to uninstall Google Play Store from your Android 4.0 device, you can follow these steps:
-
-
Go to Settings > Apps > Google Play Store and tap on "Uninstall" or "Disable".
-
Go to Settings > Security > Device administrators and uncheck "Google Play Services".
-
Go to Settings > Apps > Google Play Services and tap on "Uninstall" or "Disable".
-
Reboot your device.
-
-
Note that uninstalling Google Play Store may affect the performance and functionality of some apps that depend on it. You may also lose access to some features and services that are provided by Google Play Store, such as app updates, backup, and security.
-
Q: What are some alternatives to Google Play Store for Android 4.0 devices?
-
A: If you do not want to install Google Play Store on your Android 4.0 device, or if you are unable to do so, you can still use some alternative app stores that are compatible with older devices. Some of these app stores are:
-
-
Amazon Appstore: This is the official app store for Amazon devices, such as Kindle Fire and Fire TV. It offers a variety of apps, games, books, and music that are curated by Amazon. It also features some exclusive apps and deals that are not available on other app stores.
-
F-Droid: This is an open-source app store that hosts free and open-source apps for Android devices. It offers a range of apps that are focused on privacy, security, and customization. It also allows you to browse and install apps from different repositories and sources.
-
Aptoide: This is a community-driven app store that allows users to create and manage their own app stores. It offers a large collection of apps and games that are uploaded by users and developers. It also features some apps that are not available on other app stores.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Zero RPG Kit for Free and Start Making Pixel Art Adventures.md b/spaces/1phancelerku/anime-remove-background/Download Zero RPG Kit for Free and Start Making Pixel Art Adventures.md
deleted file mode 100644
index 15d139027b3dd3f04ddabc28a0d6b1c658741f0f..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Zero RPG Kit for Free and Start Making Pixel Art Adventures.md
+++ /dev/null
@@ -1,164 +0,0 @@
-
-
Zero RPG Kit Download: How to Create Your Own Role-Playing Game with Unity
-
Have you ever dreamed of creating your own role-playing game (RPG) like Final Fantasy, The Legend of Zelda, or Skyrim? If so, you might be interested in Zero RPG Kit, a powerful and versatile asset for Unity that allows you to create your own RPG in minutes. In this article, we will show you what Zero RPG Kit is, how to download and install it, and how to use it to create your own RPG.
Zero RPG Kit is a complete solution for creating 2D or 3D RPGs with Unity. It provides you with everything you need to make your own RPG, such as:
-
-
A flexible and modular system for creating characters, enemies, items, skills, quests, dialogues, and more.
-
A rich and diverse collection of assets, such as sprites, models, animations, sounds, music, UI elements, and icons.
-
A powerful and user-friendly editor for designing your own maps and levels.
-
A comprehensive and customizable framework for managing your game logic, such as combat, inventory, dialogue, saving/loading, etc.
-
A cross-platform support for building and deploying your game to Windows, Mac, Linux, Android, iOS, WebGL, and more.
-
-
Features and benefits of Zero RPG Kit
-
Some of the features and benefits of using Zero RPG Kit are:
-
-
It saves you time and money by providing you with a ready-made solution for creating your own RPG.
-
It gives you full control and flexibility over your game design by allowing you to customize every aspect of your game.
-
It helps you create immersive and engaging games by offering you a variety of options and features for your game mechanics.
-
It supports both 2D and 3D graphics by allowing you to switch between them easily.
-
It enables you to create multiplayer games by supporting online networking and chat features.
-
-
Requirements and compatibility of Zero RPG Kit
-
To use Zero RPG Kit, you need:
-
zero rpg kit unity asset store
-zero rpg kit free download
-zero rpg kit tutorial
-zero rpg kit review
-zero rpg kit documentation
-zero rpg kit demo
-zero rpg kit forum
-zero rpg kit update
-zero rpg kit features
-zero rpg kit license
-zero rpg kit support
-zero rpg kit coupon code
-zero rpg kit system requirements
-zero rpg kit alternatives
-zero rpg kit vs ork framework
-zero rpg kit vs invector controller
-zero rpg kit vs game creator
-zero rpg kit vs adventure creator
-zero rpg kit vs playmaker
-zero rpg kit vs bolt
-zero rpg kit vs corgi engine
-zero rpg kit vs pixel adventure
-zero rpg kit vs pixel crusade
-zero rpg kit vs pixel hero
-zero rpg kit vs pixel dungeon
-zero rpg kit vs pixel art maker
-zero rpg kit vs pixel platformer
-zero rpg kit vs pixel quest
-zero rpg kit vs pixel roguelike
-zero rpg kit vs pixel story
-zero rpg kit vs pixel fantasy world
-zero rpg kit vs pixel action game
-zero rpg kit vs pixel adventure game
-zero rpg kit vs pixel horror game
-zero rpg kit vs pixel survival game
-zero rpg kit vs pixel sandbox game
-zero rpg kit vs pixel simulation game
-zero rpg kit vs pixel strategy game
-zero rpg kit vs pixel puzzle game
-zero rpg kit vs pixel card game
-how to use zero rpg kit in unity
-how to make a game with zero rpg kit
-how to customize zero rpg kit
-how to add characters to zero rpg kit
-how to add items to zero rpg kit
-how to add quests to zero rpg kit
-how to add enemies to zero rpg kit
-how to add skills to zero rpg kit
-how to add dialogue to zero rpg kit
-how to add music to zero rpg kit
-
-
A computer that meets the minimum requirements for running Unity.
-
A licensed version of Unity 2019.4 or higher.
-
A license for Zero RPG Kit that suits your needs and budget.
-
-
Zero RPG Kit is compatible with:
-
-
Most popular platforms, such as Windows, Mac, Linux, Android, iOS, WebGL, etc.
-
Most popular input devices, such as keyboard, mouse, touch screen, gamepad, etc.
-
Most popular asset formats, such as PNG, JPG, FBX, WAV, MP3, etc.
-
-
How to download and install Zero RPG Kit
-
To download and install Zero RPG Kit, follow these steps:
-
Step 1: Visit the official website of Zero RPG Kit
-
The official website of Zero RPG Kit is https://zerorpgkit.com/. Here you can find more information about Zero RPG Kit, such as features, screenshots, videos, documentation, and support.
-
Step 2: Choose your preferred license and payment method
-
Zero RPG Kit offers three types of licenses: Personal, Plus, and Pro. Each license has different features and prices. You can compare them on the website and choose the one that best suits your needs and budget. You can also choose to pay monthly or yearly.
-
To purchase a license, you need to create an account on the website and select your payment method. You can pay with PayPal or credit card. Once you complete the payment, you will receive an email with your license key and a download link.
-
Step 3: Download the package and unzip it
-
Click on the download link in the email and save the package to your computer. The package is a ZIP file that contains the Zero RPG Kit asset and some sample projects. You need to unzip the file to extract its contents.
-
Step 4: Import the package into your Unity project
-
Open Unity and create a new project or open an existing one. Then, go to Assets > Import Package > Custom Package and select the Zero RPG Kit asset file. A window will pop up showing you the contents of the package. Click on Import to import all the files into your project.
-
How to use Zero RPG Kit to create your own RPG
-
Now that you have downloaded and installed Zero RPG Kit, you are ready to use it to create your own RPG. Here are the steps to follow:
-
Step 1: Customize the settings and assets of Zero RPG Kit
-
The first thing you need to do is to customize the settings and assets of Zero RPG Kit according to your game design. You can do this by using the Zero RPG Kit Manager, which is a window that allows you to access and modify all the options and features of Zero RPG Kit.
-
To open the Zero RPG Kit Manager, go to Window > Zero RPG Kit > Manager. Here you can see different tabs, such as General, Database, Editor, Framework, Network, etc. Each tab has different settings and assets that you can change and edit.
-
For example, in the General tab, you can change the name, version, icon, resolution, quality, language, and other general settings of your game. In the Database tab, you can create and edit your own characters, enemies, items, skills, quests, dialogues, and more. In the Editor tab, you can customize the appearance and functionality of the map editor. And so on.
-
You can also import your own assets into Zero RPG Kit by dragging and dropping them into the appropriate folders in the Project window. For example, if you want to use your own sprites for your characters, you can drag them into the Sprites folder. If you want to use your own models for your enemies, you can drag them into the Models folder. And so on.
-
Step 2: Design your own maps and levels with the built-in editor
-
The next thing you need to do is to design your own maps and levels with the built-in editor of Zero RPG Kit. The editor is a powerful and user-friendly tool that allows you to create 2D or 3D maps and levels with ease.
-
To open the editor, go to Window > Zero RPG Kit > Editor. Here you can see a toolbar with different buttons and options for creating and editing your maps and levels. You can also see a grid where you can place tiles, objects, events, triggers, lights, cameras, etc.
-
To create a map or level, you need to follow these steps:
-
-
Select a tileset from the Tilesets window. A tileset is a collection of tiles that have different shapes and textures. You can use the default tilesets provided by Zero RPG Kit or import your own tilesets.
-
Select a tile from the tileset and drag it onto the grid. You can also use the brush tool to paint multiple tiles at once. You can also use the eraser tool to erase tiles from the grid.
-
Repeat this process until you fill up the grid with tiles according to your map or level design.
-
Select an object from the Objects window. An object is anything that is not a tile, such as a character, an enemy, an item, a door, a chest, etc. You can use the default objects provided by Zero RPG Kit or import your own objects.
-
Select an object and drag it onto the grid. You can also use the rotate tool to rotate it or the scale tool to resize it.
-
Repeat this process until you place all the objects you need on your grid according to your map or level design.
-
Select an event from the Events window. An event is anything that happens when the player interacts with an object, such as a dialogue, a battle, a cutscene, etc. You can use the default events provided by Zero RPG Kit or create your own events.
-
Select an event and drag it onto the grid. You can also use the link tool to link it to an object or another event.
-
Repeat this process until you add all the events you need on your map or level.
-
Select a trigger from the Triggers window. A trigger is anything that activates an event when the player enters or exits a certain area, such as a teleporter, a switch, a trap, etc. You can use the default triggers provided by Zero RPG Kit or create your own triggers.
-
Select a trigger and drag it onto the grid. You can also use the link tool to link it to an event or another trigger.
-
Repeat this process until you add all the triggers you need on your map or level.
-
Select a light from the Lights window. A light is anything that illuminates your map or level, such as a sun, a moon, a lamp, a fire, etc. You can use the default lights provided by Zero RPG Kit or import your own lights.
-
Select a light and drag it onto the grid. You can also use the rotate tool to rotate it or the scale tool to resize it.
-
Repeat this process until you add all the lights you need on your map or level.
-
Select a camera from the Cameras window. A camera is anything that controls how your map or level is viewed by the player, such as a perspective, an orthographic, a follow, etc. You can use the default cameras provided by Zero RPG Kit or create your own cameras.
-
Select a camera and drag it onto the grid. You can also use the rotate tool to rotate it or the scale tool to resize it.
-
Repeat this process until you add all the cameras you need on your map or level.
-
-
You can also use the preview button to test your map or level in play mode. You can also use the save button to save your map or level as a scene file in your project folder.
-
Step 3: Add your own characters, enemies, items, and quests with the easy-to-use tools
-
The next thing you need to do is to add your own characters, enemies, items, and quests with the easy-to-use tools of Zero RPG Kit. These tools are windows that allow you to create and edit these elements of your game with simple forms and fields.
-
To open these tools, go to Window > Zero RPG Kit > Tools. Here you can see different windows, such as Character Creator, Enemy Creator, Item Creator, Quest Creator, etc. Each window has different tabs and options for creating and editing these elements of your game.
-
For example, in the Character Creator window, you can create and edit your own characters by filling in their name, description, stats, skills, inventory, equipment, appearance, animations, sounds, etc. In the Enemy Creator window, you can create and edit your own enemies by filling in their name, description, stats, skills, loot, appearance, animations, sounds, etc. In the Item Creator window, you can create and edit your own items by filling in their name, description, stats, type, icon, etc. In the Quest Creator window, you can create and edit your own quests by filling in their name, description, objectives, rewards, conditions, etc.
-
You can also use the preview button to test your characters, enemies, items, and quests in play mode. You can also use the save button to save them as scriptable objects in your project folder.
-
Step 4: Test and debug your game with the integrated console and profiler
-
The next thing you need to do is to test and debug your game with the integrated console and profiler of Zero RPG Kit. These are tools that allow you to monitor and optimize the performance and quality of your game.
-
To open these tools, go to Window > Zero RPG Kit > Tools. Here you can see different windows, such as Console and Profiler. Each window has different tabs and options for testing and debugging your game.
-
For example, in the Console window, you can see the output of your game, such as messages, errors, warnings, etc. You can also use commands to execute functions or change variables in your game. In the Profiler window, you can see the statistics of your game, such as CPU usage, memory usage, frame rate, etc. You can also use graphs and charts to analyze the performance of your game.
-
You can also use the play button to run your game in play mode. You can also use the pause button to pause your game and inspect its state. You can also use the step button to advance your game frame by frame.
-
Step 5: Build and deploy your game to your desired platform
-
The final thing you need to do is to build and deploy your game to your desired platform with Zero RPG Kit. This is the process of exporting your game as an executable file that can run on different devices and platforms.
-
To build and deploy your game, follow these steps:
-
-
Select a platform from the Platform window. A platform is a device or system that can run your game, such as Windows, Mac, Linux, Android, iOS, WebGL, etc. You can use the default platforms provided by Unity or add your own platforms.
-
Select a platform and click on the build button. A window will pop up asking you to choose a location and a name for your build file. You can also choose other options such as compression, resolution, quality, etc.
-
Click on build to start the building process. This may take some time depending on the size and complexity of your game.
-
Once the building process is done, you will see a message saying that your build is complete. You can also see the location and name of your build file.
-
Copy or move your build file to your target device or system. For example, if you built your game for Windows, you can copy or move it to a Windows computer. If you built your game for Android, you can copy or move it to an Android device.
-
Run your build file on your target device or system. For example, if you built your game for Windows, you can double-click on it to run it on a Windows computer. If you built your game for Android, you can tap on it to run it on an Android device.
-
-
Congratulations! You have successfully created your own RPG with Zero RPG Kit!
-
Conclusion and FAQs
-
In this article, we have shown you what Zero RPG Kit is, how to download and install it, and how to use it to create your own RPG. We hope that you have found this article helpful and informative. If you have any questions or feedback, please feel free to contact us or leave a comment below. Here are some frequently asked questions about Zero RPG Kit:
-
Q: How much does Zero RPG Kit cost?
-
A: Zero RPG Kit offers three types of licenses: Personal, Plus, and Pro. The Personal license costs $29 per month or $299 per year. The Plus license costs $49 per month or $499 per year. The Pro license costs $99 per month or $999 per year. You can also get a free trial for 14 days.
-
Q: What are the differences between the licenses?
-
A: The main differences between the licenses are the number of projects, users, and features that you can use with Zero RPG Kit. The Personal license allows you to use Zero RPG Kit for one project and one user. The Plus license allows you to use Zero RPG Kit for three projects and three users. The Pro license allows you to use Zero RPG Kit for unlimited projects and users. The Pro license also gives you access to more features, such as multiplayer support, source code access, priority support, etc.
-
Q: Can I use Zero RPG Kit for commercial purposes?
-
A: Yes, you can use Zero RPG Kit for commercial purposes as long as you have a valid license and you follow the terms and conditions of Zero RPG Kit. You can sell or distribute your games made with Zero RPG Kit without paying any royalties or fees to Zero RPG Kit.
-
Q: Can I modify or extend Zero RPG Kit?
-
A: Yes, you can modify or extend Zero RPG Kit as much as you want. You can add your own features, assets, scripts, etc. to Zero RPG Kit. You can also use other assets or plugins from the Unity Asset Store or other sources with Zero RPG Kit. However, if you want to access the source code of Zero RPG Kit, you need to have a Pro license.
-
Q: Where can I find more tutorials and resources for Zero RPG Kit?
-
A: You can find more tutorials and resources for Zero RPG Kit on the official website of Zero RPG Kit, which is https://zerorpgkit.com/. Here you can find the documentation, videos, forums, blogs, etc. for Zero RPG Kit. You can also join the Discord server of Zero RPG Kit, which is https://discord.gg/zerorpgkit. Here you can chat with other users and developers of Zero RPG Kit, ask questions, share ideas, etc.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/AI-ZTH-03-23/3.HTML5-Aframe-3dMap-Flight/README.md b/spaces/AI-ZTH-03-23/3.HTML5-Aframe-3dMap-Flight/README.md
deleted file mode 100644
index cc242cc5fca2c61e00f83936813d157dd313e246..0000000000000000000000000000000000000000
--- a/spaces/AI-ZTH-03-23/3.HTML5-Aframe-3dMap-Flight/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: 3.HTML5-3D-VR-Aframe-Map-Land
-emoji: 🗺️VR🏞️
-colorFrom: blue
-colorTo: green
-sdk: static
-pinned: false
-license: mit
-duplicated_from: awacke1/HTML5-Aframe-3dMap-Flight
----
-
-🏷️ **Title:** HTML5-3D-VR-Aframe-Map 📚3D-VR
-
-📋 **Description:** This is a fun 📚3D-VR simulator that shows a map 🗺️ with motion controls ⌨️ of the WASD keyboard. You can explore a 3D landscape 🏞️ using Aframe.
-
-🧐 **Details:**
-
-- **HTML5:** Refers to the version of the HTML (Hypertext Markup Language) used to create the web page on which the 3D-VR-Aframe-Map is hosted.
-
-- **3D:** Refers to the three-dimensional nature of the map in the 3D-VR-Aframe-Map simulator.
-
-- **VR:** Refers to the virtual reality aspect of the 3D-VR-Aframe-Map simulator. Users can immerse themselves in the virtual environment and interact with it using VR headsets.
-
-- **Aframe:** Refers to the web framework used to create the 3D-VR-Aframe-Map simulator. Aframe is a popular framework for creating virtual reality experiences on the web.
-
-- **Map:** Refers to the representation of geographic or spatial data in a visual form. In the 3D-VR-Aframe-Map simulator, users can explore a 3D landscape using motion controls and a map interface.
-
-💻 **Code Snippet:**
-
-```html
-
-
- HTML5-3D-VR-Aframe-Map 📚3D-VR
-
-
-
-
-
-
-
-
-
-
-
-
-```
-
-🔑 Acronyms:
-
-HTML: Hypertext Markup Language, a coding language used to create web pages.
-VR: Virtual Reality, an immersive experience that simulates a real environment.
-Aframe: A web framework used to create virtual reality experiences on the web.
-WASD: A set of four keyboard keys that are commonly used in video games for motion controls.
\ No newline at end of file
diff --git a/spaces/AIWaves/SOP_Generation-single/app.py b/spaces/AIWaves/SOP_Generation-single/app.py
deleted file mode 100644
index 093f8584b4e4e3476d379b5dc2cb23977ebaea68..0000000000000000000000000000000000000000
--- a/spaces/AIWaves/SOP_Generation-single/app.py
+++ /dev/null
@@ -1,395 +0,0 @@
-import sys
-import os
-import argparse
-from gradio_base import WebUI, UIHelper, PORT, HOST, Client
-from gradio_config import GradioConfig as gc
-from typing import List, Tuple, Any
-import gradio as gr
-import time
-from Agent import Agent
-from design_states import get_desgin_states,get_cot_result
-from gen_utils import *
-from utils import get_embedding,cos_sim
-import torch
-import json
-import openai
-
-def get_embedding(sentence,api_key):
- openai.api_key = api_key
- embedding_model = openai.Embedding
- embed = embedding_model.create(
- model="text-embedding-ada-002",
- input=sentence
- )
- embed = embed["data"][0]["embedding"]
- embed = torch.tensor(embed,dtype=torch.float32)
- if len(embed.shape)==1:
- embed = embed.unsqueeze(0)
- return embed
-
-class GeneralUI(WebUI):
- def render_and_register_ui(self):
- # bind the agent with avatar
- self.agent_name:list = [self.cache["agents_name"]] if isinstance(self.cache["agents_name"], str) else self.cache['agents_name']
- gc.add_agent(self.agent_name)
-
- def handle_message(self, history, state, agent_name, token, node_name):
- if state % 10 == 0:
- self.data_history.append({agent_name: token})
- elif state % 10 == 1:
- # Same state. Need to add new bubble in same bubble.
- self.data_history[-1][agent_name] += token
- elif state % 10 == 2:
- # New state. Need to add new bubble.
- history.append([None, ""])
- self.data_history.clear()
- self.data_history.append({agent_name: token})
- else:
- assert False, "Invalid state."
- render_data = self.render_bubble(history, self.data_history, node_name, render_node_name= True)
- return render_data
-
- def __init__(
- self,
- client_cmd: list,
- socket_host: str = HOST,
- socket_port: int = PORT,
- bufsize: int = 1024,
- ui_name: str = "GeneralUI"
- ):
- super(GeneralUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
- self.first_recieve_from_client()
- self.current_node_name = ""
- self.data_history = None
- for _ in ['agents_name', 'api_key']:
- assert _ in self.cache
-
- def generate_sop(self,api_key,proxy,target):
- os.environ["API_KEY"] = api_key
- # os.environ["PROXY"] = proxy
- self.design_assistant = "An assistant that can help users create content such as articles, blogs, advertising copy, etc"
- self.tutor = "A tutor who provides personalized learning resources for students to help them understand complex concepts and problems"
- self.online_medical_consultant = "An online medical consultant who offers preliminary medical advice to patients and answers common questions about diseases, symptoms, and treatments."
- self.online_legal_consultant = "An online legal advisor who can respond to inquiries related to legal matters, providing basic legal information and advice."
- self.online_financial_advisor = "An online financial advisor who can analyze financial markets and data, offering investment advice and market forecasts to users."
- self.virtual_tour_guide = "A virtual tour guide providing destination information, travel recommendations, and virtual travel experiences for travelers."
- self.design_assistant = get_embedding(self.design_assistant,api_key)
- self.tutor = get_embedding(self.tutor,api_key)
- self.online_medical_consultant = get_embedding(self.online_medical_consultant,api_key)
- self.online_legal_consultant = get_embedding(self.online_legal_consultant,api_key)
- self.online_financial_advisor = get_embedding(self.online_financial_advisor,api_key)
- self.virtual_tour_guide = get_embedding(self.virtual_tour_guide,api_key)
- self.embeddings = torch.cat([self.design_assistant,self.tutor,self.online_medical_consultant,self.online_legal_consultant,self.online_financial_advisor,self.virtual_tour_guide],dim = 0)
- self.SOP["config"]["API_KEY"] = api_key
- # self.SOP["config"]["PROXY"] = proxy
- target_tensor = get_embedding(target,api_key)
- sim_scores = cos_sim(target_tensor, self.embeddings)[0]
- top_k_score, top_k_idx = torch.topk(sim_scores,k = 1)
- if top_k_score > 0.7:
- index = top_k_idx
- else:
- index = 0
- target = get_cot_result(target)
- design_states = get_desgin_states(target,index)
- root = design_states[0]["state_name"]
- agents = get_agents(design_states)
- relations = get_relations(design_states)
- states = gen_states(design_states)
- for state_name,state_dict in states.items():
- state_dict["begin_role"] = list(agents.keys())[0]
- state_dict["begin_query"] = "Now that we are in the **{}**, I'm glad to offer you assistance.".format(state_name)
- self.SOP["root"] = root
- self.SOP["relations"] = relations
- self.SOP["agents"] = agents
- self.SOP["states"] = states
- # 将字典写入JSON文件
- print(self.SOP)
- file_name = 'generated_sop.json'
- with open(file_name, "w",encoding="utf-8") as json_file:
- json.dump(self.SOP, json_file ,indent=4,ensure_ascii=False)
- return file_name
-
- def load_sop_fn(self,sop):
- return sop.name
-
- def construct_ui(self):
- with gr.Blocks(css=gc.CSS) as demo:
- with gr.Tab(label="SOP generation") as tab1:
- self.SOP = {
- "config": {
- "API_KEY": "sk-********",
- "MAX_CHAT_HISTORY": "5",
- "User_Names": '["User"]',
- },
- "root": "state1",
- "relations": {
- "state1": {"0": "state1", "1": "state2"},
- "state2": {"0": "state2", "1": "end_state"},
- },
- "agents": None,
- "states": None,
- }
- gr.Markdown("""# Generate Agent""")
- with gr.Row():
- self.api_key_sop_generation = gr.Textbox(label="api_key")
- self.proxy_sop_generation = gr.Textbox(label="proxy",visible=False)
- with gr.Row():
- self.requirement_sop_generation = gr.Textbox(value ="a shopping assistant help customer to buy the commodity",label="requirement")
- with gr.Row():
- self.generated_sop = gr.File(label="generated_file")
- self.generate_button = gr.Button(label="Generate")
- self.generate_button.click(fn = self.generate_sop,inputs=[self.api_key_sop_generation,self.proxy_sop_generation,self.requirement_sop_generation],outputs=[self.generated_sop])
- with gr.Tab(label="Chat") as tab2:
- uploaded_sop = gr.State()
- with gr.Row():
- sop = gr.File(label="upload your custmized SOP")
- load_sop_btn = gr.Button(value="Load SOP")
- load_sop_btn.click(self.load_sop_fn, sop,uploaded_sop)
- with gr.Column():
- self.radio_mode = gr.Radio(
- [Client.SINGLE_MODE],
- label = Client.MODE_LABEL,
- info = Client.MODE_INFO,
- value= Client.SINGLE_MODE,
- interactive=True
- # label="Select the execution mode",
- # info="Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
- )
- self.text_api = gr.Textbox(
- value = self.cache["api_key"],
- placeholder="openai key",
- label="Please input valid openai key for gpt-3.5-turbo-16k."
- )
- self.btn_start = gr.Button(
- value="Start😁(Click here to start!)",
- )
- self.chatbot = gr.Chatbot(
- elem_id="chatbot1",
- label="Dialog",
- visible=False,
- height=700
- )
- self.btn_next = gr.Button(
- value="Next Agent Start",
- visible=False
- )
- with gr.Row():
- self.text_input = gr.Textbox(
- placeholder="Please enter your content.",
- label="Input",
- scale=9,
- visible=False
- )
- self.btn_send = gr.Button(
- value="Send",
- visible=False
- )
- self.btn_reset = gr.Button(
- value="Restart",
- visible=False
- )
-
- all_components = [self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
-
- self.btn_start.click(
- fn = self.btn_start_when_click,
- inputs=[self.radio_mode, self.text_api,uploaded_sop],
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode, self.text_api]
- ).then(
- fn = self.btn_start_after_click,
- inputs=[self.chatbot],
- outputs=all_components
- )
-
- self.btn_send.click(
- fn=self.btn_send_when_click,
- inputs=[self.text_input, self.chatbot],
- outputs=all_components
- ).then(
- fn=self.btn_send_after_click,
- inputs=[self.text_input, self.chatbot],
- outputs=all_components
- )
-
- self.text_input.submit(
- fn=self.btn_send_when_click,
- inputs=[self.text_input, self.chatbot],
- outputs=all_components
- ).then(
- fn=self.btn_send_after_click,
- inputs=[self.text_input, self.chatbot],
- outputs=all_components
- )
-
- self.btn_reset.click(
- fn=self.btn_reset_when_click,
- inputs=[],
- outputs=all_components
- ).then(
- fn=self.btn_reset_after_click,
- inputs=[],
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode, self.text_api]
- )
-
- self.btn_next.click(
- fn=self.btn_next_when_click,
- inputs=[self.chatbot],
- outputs=all_components
- ).then(
- fn=self.btn_next_after_click,
- inputs=[self.chatbot],
- outputs=all_components
- )
-
- self.demo = demo
-
- def btn_start_when_click(self, mode, api,sop):
- """
- inputs=[mode, api]
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode]
- """
- print("server: send ", mode, api)
- self.send_start_cmd({"mode": mode, "api_key":api,"uploaded_sop": sop})
- agents,roles_to_names,names_to_roles = Agent.from_config(str(sop))
- agents_name = []
- for i in names_to_roles :
- for j in names_to_roles[i]:
- agents_name.append(j+"("+names_to_roles[i][j]+")")
- self.new_render_and_register_ui(agents_name)
- return gr.Button.update(visible=False), \
- gr.Button.update(visible=False),\
- gr.Button.update(visible=False),\
- gr.Chatbot.update(visible=True),\
- gr.Textbox.update(visible=False),\
- gr.Button.update(visible=False),\
- gr.Radio.update(visible=False),\
- gr.Textbox.update(visible=False)
-
- def new_render_and_register_ui(self,agent_names):
- gc.add_agent(agent_names, 0)
-
- def btn_start_after_click(self, history):
- """
- inputs=[self.chatbot]
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
- """
- if self.data_history is None:
- self.data_history = list()
- receive_server = self.receive_server
- while True:
- data_list: List = receive_server.send(None)
- for item in data_list:
- data = eval(item)
- assert isinstance(data, list)
- state, agent_name, token, node_name = data
- self.current_node_name = node_name
- assert isinstance(state, int)
- assert state in [10, 11, 12, 30, 99, 98]
- if state == 99:
- # finish
- yield gr.Button.update(visible=False),\
- gr.Button.update(visible=True, interactive=False),\
- gr.Button.update(visible=True, interactive=True),\
- history,\
- gr.Textbox.update(visible=True, interactive=False),\
- gr.Button.update(visible=False)
- return
- elif state == 98:
- # single mode
- yield gr.Button.update(visible=False), \
- gr.Button.update(visible=False),\
- gr.Button.update(visible=True),\
- history,\
- gr.Textbox.update(visible=False),\
- gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
- return
- elif state == 30:
- # user input
- yield gr.Button.update(visible=False), \
- gr.Button.update(visible=True),\
- gr.Button.update(visible=True),\
- history,\
- gr.Textbox.update(visible=True, value=""),\
- gr.Button.update(visible=False)
- return
- history = self.handle_message(history, state, agent_name, token, node_name)
- yield gr.Button.update(visible=False), \
- gr.Button.update(visible=False),\
- gr.Button.update(visible=False),\
- history,\
- gr.Textbox.update(visible=False),\
- gr.Button.update(visible=False)
-
- def btn_send_when_click(self, text_input, history):
- '''
- inputs=[self.text_input, self.chatbot]
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
- '''
- history = self.handle_message(history, 10, 'User', text_input, self.current_node_name)
- self.send_message(""+text_input+self.SIGN["SPLIT"])
- yield gr.Button.update(visible=False), \
- gr.Button.update(visible=False),\
- gr.Button.update(visible=False),\
- history,\
- gr.Textbox.update(visible=False),\
- gr.Button.update(visible=False)
- return
-
- def btn_send_after_click(self, text_input, history):
- '''
- inputs=[self.text_input, self.chatbot]
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
- '''
- yield from self.btn_start_after_click(history=history)
- return
-
- def btn_reset_when_click(self):
- """
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
- """
- return gr.Button.update(interactive=False), gr.Button.update(interactive=False), gr.Button.update(interactive=False, value="Restarting....."), gr.Chatbot.update(label="Dialog"), \
- gr.Textbox.update(interactive=False), gr.Button.update(visible=False)
-
- def btn_reset_after_click(self):
- """
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode]
- """
- self.reset()
- self.first_recieve_from_client(reset_mode=True)
- self.current_node_name = ""
- self.data_history = None
- return gr.Button.update(interactive=True, visible=True), \
- gr.Button.update(interactive=True, visible=False), \
- gr.Button.update(interactive=True, value="Restart", visible=False), \
- gr.Chatbot.update(label="Dialog", visible=False, value=None), \
- gr.Textbox.update(interactive=True, visible=False),\
- gr.Button.update(visible=False),\
- gr.Radio.update(visible=True), \
- gr.Textbox.update(visible=True)
-
- def btn_next_when_click(self, history):
- """
- outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
- """
- yield gr.Button.update(visible=False), \
- gr.Button.update(visible=False),\
- gr.Button.update(visible=False),\
- history,\
- gr.Textbox.update(visible=False),\
- gr.Button.update(visible=False)
- self.send_message("nothing")
- return
-
- def btn_next_after_click(self, history):
- time.sleep(1)
- yield from self.btn_start_after_click(history=history)
- return
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='A demo of chatbot')
- parser.add_argument('--agent', type=str, help='path to SOP json')
- args = parser.parse_args()
-
- ui = GeneralUI(client_cmd=["python","gradio_backend.py"])
- ui.construct_ui()
- ui.run()
diff --git a/spaces/Aaaaaaaabdualh/meter2poem-1/README.md b/spaces/Aaaaaaaabdualh/meter2poem-1/README.md
deleted file mode 100644
index 73919f4d04ec69c05fb501aea1895c34ce271000..0000000000000000000000000000000000000000
--- a/spaces/Aaaaaaaabdualh/meter2poem-1/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Meter2poem 1
-emoji: 🐨
-colorFrom: gray
-colorTo: red
-sdk: gradio
-sdk_version: 3.2
-app_file: app.py
-pinned: false
-license: afl-3.0
-duplicated_from: mareloraby/meter2poem-1
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Abhaykoul/Merriam-webster_clone/README.md b/spaces/Abhaykoul/Merriam-webster_clone/README.md
deleted file mode 100644
index 53c8fc05de086c11d80f55fd11743c32f5303ce7..0000000000000000000000000000000000000000
--- a/spaces/Abhaykoul/Merriam-webster_clone/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Merriam-webster Clone
-emoji: ⚡
-colorFrom: green
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.28.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/Opchatgpts.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/Opchatgpts.py
deleted file mode 100644
index ab0d68c903dbe4133d103c5e49cb6b3cd0852a7e..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/Opchatgpts.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from __future__ import annotations
-
-from .ChatgptLogin import ChatgptLogin
-
-
-class Opchatgpts(ChatgptLogin):
- url = "https://opchatgpts.net"
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateOverlapSizer.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateOverlapSizer.js
deleted file mode 100644
index 57606d4f7d97b578511e0c728f64b022c112fb75..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateOverlapSizer.js
+++ /dev/null
@@ -1,8 +0,0 @@
-import CreateAnySizer from './utils/CreateAnySizer.js';
-import OverlapSizer from '../../overlapsizer/OverlapSizer.js';
-
-var CreateOverlapSizer = function (scene, data, view, styles, customBuilders) {
- return CreateAnySizer(scene, data, view, styles, customBuilders, OverlapSizer);
-}
-
-export default CreateOverlapSizer;
\ No newline at end of file
diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/vits/README.md b/spaces/Akmyradov/TurkmenTTSweSTT/vits/README.md
deleted file mode 100644
index f7883f8c5badbece0887d48e41436a32e64c5935..0000000000000000000000000000000000000000
--- a/spaces/Akmyradov/TurkmenTTSweSTT/vits/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech
-
-### Jaehyeon Kim, Jungil Kong, and Juhee Son
-
-In our recent [paper](https://arxiv.org/abs/2106.06103), we propose VITS: Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech.
-
-Several recent end-to-end text-to-speech (TTS) models enabling single-stage training and parallel sampling have been proposed, but their sample quality does not match that of two-stage TTS systems. In this work, we present a parallel end-to-end TTS method that generates more natural sounding audio than current two-stage models. Our method adopts variational inference augmented with normalizing flows and an adversarial training process, which improves the expressive power of generative modeling. We also propose a stochastic duration predictor to synthesize speech with diverse rhythms from input text. With the uncertainty modeling over latent variables and the stochastic duration predictor, our method expresses the natural one-to-many relationship in which a text input can be spoken in multiple ways with different pitches and rhythms. A subjective human evaluation (mean opinion score, or MOS) on the LJ Speech, a single speaker dataset, shows that our method outperforms the best publicly available TTS systems and achieves a MOS comparable to ground truth.
-
-Visit our [demo](https://jaywalnut310.github.io/vits-demo/index.html) for audio samples.
-
-We also provide the [pretrained models](https://drive.google.com/drive/folders/1ksarh-cJf3F5eKJjLVWY0X1j1qsQqiS2?usp=sharing).
-
-** Update note: Thanks to [Rishikesh (ऋषिकेश)](https://github.com/jaywalnut310/vits/issues/1), our interactive TTS demo is now available on [Colab Notebook](https://colab.research.google.com/drive/1CO61pZizDj7en71NQG_aqqKdGaA_SaBf?usp=sharing).
-
-
-
-
VITS at training
-
VITS at inference
-
-
-
-
-
-
-
-
-## Pre-requisites
-0. Python >= 3.6
-0. Clone this repository
-0. Install python requirements. Please refer [requirements.txt](requirements.txt)
- 1. You may need to install espeak first: `apt-get install espeak`
-0. Download datasets
- 1. Download and extract the LJ Speech dataset, then rename or create a link to the dataset folder: `ln -s /path/to/LJSpeech-1.1/wavs DUMMY1`
- 1. For mult-speaker setting, download and extract the VCTK dataset, and downsample wav files to 22050 Hz. Then rename or create a link to the dataset folder: `ln -s /path/to/VCTK-Corpus/downsampled_wavs DUMMY2`
-0. Build Monotonic Alignment Search and run preprocessing if you use your own datasets.
-```sh
-# Cython-version Monotonoic Alignment Search
-cd monotonic_align
-python setup.py build_ext --inplace
-
-# Preprocessing (g2p) for your own datasets. Preprocessed phonemes for LJ Speech and VCTK have been already provided.
-# python preprocess.py --text_index 1 --filelists filelists/ljs_audio_text_train_filelist.txt filelists/ljs_audio_text_val_filelist.txt filelists/ljs_audio_text_test_filelist.txt
-# python preprocess.py --text_index 2 --filelists filelists/vctk_audio_sid_text_train_filelist.txt filelists/vctk_audio_sid_text_val_filelist.txt filelists/vctk_audio_sid_text_test_filelist.txt
-```
-
-
-## Training Exmaple
-```sh
-# LJ Speech
-python train.py -c configs/ljs_base.json -m ljs_base
-
-# VCTK
-python train_ms.py -c configs/vctk_base.json -m vctk_base
-```
-
-
-## Inference Example
-See [inference.ipynb](inference.ipynb)
diff --git a/spaces/AmirTrader/LinearRegression/Dockerfile b/spaces/AmirTrader/LinearRegression/Dockerfile
deleted file mode 100644
index c33a0787f9bfc4eb7088822ae9e724bad601c068..0000000000000000000000000000000000000000
--- a/spaces/AmirTrader/LinearRegression/Dockerfile
+++ /dev/null
@@ -1,16 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-RUN python3 -m pip install --no-cache-dir --upgrade pip
-RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-COPY . .
-
-CMD ["panel", "serve", "/code/app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "*"]
-
-RUN mkdir /.cache
-RUN chmod 777 /.cache
-RUN mkdir .chroma
-RUN chmod 777 .chroma
\ No newline at end of file
diff --git a/spaces/Amitontheweb/InstaoffyzFreeParaphraser/app.py b/spaces/Amitontheweb/InstaoffyzFreeParaphraser/app.py
deleted file mode 100644
index abf82eeada38fe1fa9dd5faea2cde0002e4356bf..0000000000000000000000000000000000000000
--- a/spaces/Amitontheweb/InstaoffyzFreeParaphraser/app.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#---------------------AI Paraphraser - iFrame code --------------
-# With direct model load
-#----------------------------------------------------------------
-
-
-import transformers
-import gradio as gr
-import torch
-
-from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
-
-tokenizer = AutoTokenizer.from_pretrained("humarin/chatgpt_paraphraser_on_T5_base")
-model = AutoModelForSeq2SeqLM.from_pretrained("humarin/chatgpt_paraphraser_on_T5_base")
-
-def paraphrase(
- Content_to_Rephrase,
- num_beams=5,
- num_beam_groups=5,
- num_return_sequences=5,
- repetition_penalty=10.0,
- diversity_penalty=3.0,
- no_repeat_ngram_size=2,
- temperature=0.7,
- max_length=5000
-):
- input_ids = tokenizer(
- f'paraphrase: {Content_to_Rephrase}',
- return_tensors="pt", padding="longest",
- max_length=max_length,
- truncation=True,
- ).input_ids
-
- outputs = model.generate(
- input_ids, temperature=temperature, repetition_penalty=repetition_penalty,
- num_return_sequences=num_return_sequences, no_repeat_ngram_size=no_repeat_ngram_size,
- num_beams=num_beams, num_beam_groups=num_beam_groups,
- max_length=max_length, diversity_penalty=diversity_penalty
- )
-
- res = tokenizer.batch_decode(outputs, skip_special_tokens=True)
- res1 = res [0]
- res2 = res [1]
- res3 = res [3]
- res4 = res [4]
-
- return res1, res2, res3
-
-output1 = gr.Textbox(label="Rephrased: Option 1")
-output2 = gr.Textbox(label="Rephrased: Option 2")
-output3 = gr.Textbox(label="Rephrased: Option 3")
-
-iface = gr.Interface(fn=paraphrase,
- inputs=["text"],
- outputs=[output1, output2, output3],
- title="Free AI Sentence Rephraser",
- description="
Paste text in the input box and press 'Submit'.
Max length: ~35 words (larger content is summarized)
The rephrased sentences *may not* be better than the original input.
Model 'humarin' pre-trained by ChatGPT. Temp = 0.7
",
- examples=[
- ["With the humble is wisdom."],
- ["Hatred stirs up strife."],
- ["The way of a fool is right in his own eyes."],
- ["Righteousness leads to life."],
- ],
- cache_examples=True,
- )
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Amon1/ChatGPTForAcadamic/Dockerfile b/spaces/Amon1/ChatGPTForAcadamic/Dockerfile
deleted file mode 100644
index 564392c933342f77731be47faa417bb8906067bc..0000000000000000000000000000000000000000
--- a/spaces/Amon1/ChatGPTForAcadamic/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM python:3.11
-
-RUN echo '[global]' > /etc/pip.conf && \
- echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
- echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
-
-RUN pip3 install gradio requests[socks] mdtex2html
-
-COPY . /gpt
-WORKDIR /gpt
-
-
-CMD ["python3", "main.py"]
\ No newline at end of file
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/utils/models_utils.py b/spaces/Amrrs/DragGan-Inversion/PTI/utils/models_utils.py
deleted file mode 100644
index 836151dcc405d62fa435a3cc3b3a0bd3472eeb03..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/utils/models_utils.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import pickle
-import functools
-import torch
-from PTI.configs import paths_config, global_config
-
-
-def toogle_grad(model, flag=True):
- for p in model.parameters():
- p.requires_grad = flag
-
-
-def load_tuned_G(run_id, type):
- new_G_path = f'{paths_config.checkpoints_dir}/model_{run_id}_{type}.pt'
- with open(new_G_path, 'rb') as f:
- new_G = torch.load(f).to(global_config.device).eval()
- new_G = new_G.float()
- toogle_grad(new_G, False)
- return new_G
-
-
-def load_old_G():
- with open(paths_config.stylegan2_ada_ffhq, 'rb') as f:
- old_G = pickle.load(f)['G_ema'].to(global_config.device).eval()
- old_G = old_G.float()
- return old_G
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/centripetalnet/README.md b/spaces/Andy1621/uniformer_image_detection/configs/centripetalnet/README.md
deleted file mode 100644
index 18631da0ac205c7a364dadc61903e1eb8acb2d6c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/centripetalnet/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# CentripetalNet
-
-## Introduction
-
-[ALGORITHM]
-
-```latex
-@InProceedings{Dong_2020_CVPR,
-author = {Dong, Zhiwei and Li, Guoxuan and Liao, Yue and Wang, Fei and Ren, Pengju and Qian, Chen},
-title = {CentripetalNet: Pursuing High-Quality Keypoint Pairs for Object Detection},
-booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
-month = {June},
-year = {2020}
-}
-```
-
-## Results and models
-
-| Backbone | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download |
-| :-------------: | :--------: |:----------------: | :------: | :------------: | :----: | :------: | :--------: |
-| HourglassNet-104 | [16 x 6](./centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | 190/210 | 16.7 | 3.7 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804.log.json) |
-
-Note:
-
-- TTA setting is single-scale and `flip=True`.
-- The model we released is the best checkpoint rather than the latest checkpoint (box AP 44.8 vs 44.6 in our experiment).
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_scoring_roi_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_scoring_roi_head.py
deleted file mode 100644
index c6e55c7752209cb5c15eab689ad9e8ac1fef1b66..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_scoring_roi_head.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import torch
-
-from mmdet.core import bbox2roi
-from ..builder import HEADS, build_head
-from .standard_roi_head import StandardRoIHead
-
-
-@HEADS.register_module()
-class MaskScoringRoIHead(StandardRoIHead):
- """Mask Scoring RoIHead for Mask Scoring RCNN.
-
- https://arxiv.org/abs/1903.00241
- """
-
- def __init__(self, mask_iou_head, **kwargs):
- assert mask_iou_head is not None
- super(MaskScoringRoIHead, self).__init__(**kwargs)
- self.mask_iou_head = build_head(mask_iou_head)
-
- def init_weights(self, pretrained):
- """Initialize the weights in head.
-
- Args:
- pretrained (str, optional): Path to pre-trained weights.
- Defaults to None.
- """
- super(MaskScoringRoIHead, self).init_weights(pretrained)
- self.mask_iou_head.init_weights()
-
- def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
- img_metas):
- """Run forward function and calculate loss for Mask head in
- training."""
- pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
- mask_results = super(MaskScoringRoIHead,
- self)._mask_forward_train(x, sampling_results,
- bbox_feats, gt_masks,
- img_metas)
- if mask_results['loss_mask'] is None:
- return mask_results
-
- # mask iou head forward and loss
- pos_mask_pred = mask_results['mask_pred'][
- range(mask_results['mask_pred'].size(0)), pos_labels]
- mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
- pos_mask_pred)
- pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
- pos_labels]
-
- mask_iou_targets = self.mask_iou_head.get_targets(
- sampling_results, gt_masks, pos_mask_pred,
- mask_results['mask_targets'], self.train_cfg)
- loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
- mask_iou_targets)
- mask_results['loss_mask'].update(loss_mask_iou)
- return mask_results
-
- def simple_test_mask(self,
- x,
- img_metas,
- det_bboxes,
- det_labels,
- rescale=False):
- """Obtain mask prediction without augmentation."""
- # image shapes of images in the batch
- ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
-
- num_imgs = len(det_bboxes)
- if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
- num_classes = self.mask_head.num_classes
- segm_results = [[[] for _ in range(num_classes)]
- for _ in range(num_imgs)]
- mask_scores = [[[] for _ in range(num_classes)]
- for _ in range(num_imgs)]
- else:
- # if det_bboxes is rescaled to the original image size, we need to
- # rescale it back to the testing scale to obtain RoIs.
- if rescale and not isinstance(scale_factors[0], float):
- scale_factors = [
- torch.from_numpy(scale_factor).to(det_bboxes[0].device)
- for scale_factor in scale_factors
- ]
- _bboxes = [
- det_bboxes[i][:, :4] *
- scale_factors[i] if rescale else det_bboxes[i]
- for i in range(num_imgs)
- ]
- mask_rois = bbox2roi(_bboxes)
- mask_results = self._mask_forward(x, mask_rois)
- concat_det_labels = torch.cat(det_labels)
- # get mask scores with mask iou head
- mask_feats = mask_results['mask_feats']
- mask_pred = mask_results['mask_pred']
- mask_iou_pred = self.mask_iou_head(
- mask_feats, mask_pred[range(concat_det_labels.size(0)),
- concat_det_labels])
- # split batch mask prediction back to each image
- num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)
- mask_preds = mask_pred.split(num_bboxes_per_img, 0)
- mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)
-
- # apply mask post-processing to each image individually
- segm_results = []
- mask_scores = []
- for i in range(num_imgs):
- if det_bboxes[i].shape[0] == 0:
- segm_results.append(
- [[] for _ in range(self.mask_head.num_classes)])
- mask_scores.append(
- [[] for _ in range(self.mask_head.num_classes)])
- else:
- segm_result = self.mask_head.get_seg_masks(
- mask_preds[i], _bboxes[i], det_labels[i],
- self.test_cfg, ori_shapes[i], scale_factors[i],
- rescale)
- # get mask scores with mask iou head
- mask_score = self.mask_iou_head.get_mask_scores(
- mask_iou_preds[i], det_bboxes[i], det_labels[i])
- segm_results.append(segm_result)
- mask_scores.append(mask_score)
- return list(zip(segm_results, mask_scores))
diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/metrics_accumulator.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/metrics_accumulator.py
deleted file mode 100644
index e38f0d014afd70475ab6051c76ad3ee75493d575..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/metrics_accumulator.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from collections import defaultdict
-
-import numpy as np
-
-
-class MetricsAccumulator:
- def __init__(self) -> None:
- self.accumulator = defaultdict(lambda: [])
-
- def update_metric(self, metric_name, metric_value):
- self.accumulator[metric_name].append(metric_value)
-
- def print_average_metric(self):
- for k, v in self.accumulator.items():
- average_v = np.array(v).mean()
- print(f"{k} - {average_v:.2f}")
-
- self.__init__()
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/drive.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/drive.py
deleted file mode 100644
index 06e8ff606e0d2a4514ec8b7d2c6c436a32efcbf4..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/drive.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'DRIVEDataset'
-data_root = 'data/DRIVE'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (584, 565)
-crop_size = (64, 64)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py
deleted file mode 100644
index 30b1a3d6580cf0360710426fbea1f05acdf07b4b..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch.nn as nn
-
-from .registry import ACTIVATION_LAYERS
-
-
-@ACTIVATION_LAYERS.register_module()
-class HSigmoid(nn.Module):
- """Hard Sigmoid Module. Apply the hard sigmoid function:
- Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)
- Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1)
-
- Args:
- bias (float): Bias of the input feature map. Default: 1.0.
- divisor (float): Divisor of the input feature map. Default: 2.0.
- min_value (float): Lower bound value. Default: 0.0.
- max_value (float): Upper bound value. Default: 1.0.
-
- Returns:
- Tensor: The output tensor.
- """
-
- def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
- super(HSigmoid, self).__init__()
- self.bias = bias
- self.divisor = divisor
- assert self.divisor != 0
- self.min_value = min_value
- self.max_value = max_value
-
- def forward(self, x):
- x = (x + self.bias) / self.divisor
-
- return x.clamp_(self.min_value, self.max_value)
diff --git a/spaces/Arcader7171/positive/app.py b/spaces/Arcader7171/positive/app.py
deleted file mode 100644
index 0454a65e09e0bfb8b93a2f7e9153b69662a900b7..0000000000000000000000000000000000000000
--- a/spaces/Arcader7171/positive/app.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import gradio as gr
-import random
-
-def sentences():
- return random.choice(["'Work may be important, but make time to have some fun' -Me", "'Stay positive. Better days are on their way' -Unknown", "'Life is like a bicycle. To keep your balance, you must keep moving' -Albert Einstein"])
-
-with gr.Blocks() as pos:
- txt = gr.Textbox(value="", label="Textbox")
- btn = gr.Button(value="Free Inspirational Quotes")
- btn.click(sentences, outputs=[txt])
-
-
-pos.launch()
\ No newline at end of file
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/ml_nms.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/ml_nms.py
deleted file mode 100644
index 325d709a98422d8a355fc7c7e281179642850968..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/ml_nms.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from detectron2.layers import batched_nms
-
-
-def ml_nms(boxlist, nms_thresh, max_proposals=-1,
- score_field="scores", label_field="labels"):
- """
- Performs non-maximum suppression on a boxlist, with scores specified
- in a boxlist field via score_field.
- Arguments:
- boxlist(BoxList)
- nms_thresh (float)
- max_proposals (int): if > 0, then only the top max_proposals are kept
- after non-maximum suppression
- score_field (str)
- """
- if nms_thresh <= 0:
- return boxlist
- if boxlist.has('pred_boxes'):
- boxes = boxlist.pred_boxes.tensor
- labels = boxlist.pred_classes
- else:
- boxes = boxlist.proposal_boxes.tensor
- labels = boxlist.proposal_boxes.tensor.new_zeros(
- len(boxlist.proposal_boxes.tensor))
- scores = boxlist.scores
-
- keep = batched_nms(boxes, scores, labels, nms_thresh)
- if max_proposals > 0:
- keep = keep[: max_proposals]
- boxlist = boxlist[keep]
- return boxlist
diff --git a/spaces/BIASLab/sars-cov-2-classification-fcgr/src/models/resnet50_8mers.py b/spaces/BIASLab/sars-cov-2-classification-fcgr/src/models/resnet50_8mers.py
deleted file mode 100644
index fe8abb8f21612ec54ca13145025d6df82176f9c6..0000000000000000000000000000000000000000
--- a/spaces/BIASLab/sars-cov-2-classification-fcgr/src/models/resnet50_8mers.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# https://github.com/c1ph3rr/Deep-Residual-Learning-for-Image-Recognition/blob/master/Resnet50.py
-from pathlib import Path
-from tensorflow.keras.models import Model
-from tensorflow.keras.layers import (
- Input,
- Conv2D,
- Dense,
- MaxPool2D,
- GlobalAveragePooling2D,
- Add,
- Activation,
- BatchNormalization,
- ZeroPadding2D,
-)
-
-# Reference name of model
-MODEL_NAME = str(Path(__file__).resolve().stem)
-
-def identity_block(inp, filters, kernel_size, block, layer):
-
- f1, f2, f3 = filters
-
- conv_name = 'id_conv_b' + block + '_l' + layer
- batch_name = 'id_batch_b' + block + '_l' + layer
-
- x = Conv2D(filters=f1, kernel_size=1, padding='same', kernel_initializer='he_normal', name=conv_name + '_a')(inp)
- x = BatchNormalization(name=batch_name + '_a')(x)
- x = Activation('relu')(x)
-
- x = Conv2D(filters=f2, kernel_size=kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name + '_b')(x)
- x = BatchNormalization(name=batch_name + '_b')(x)
- x = Activation('relu')(x)
-
- x = Conv2D(filters=f3, kernel_size=1, padding='same', kernel_initializer='he_normal', name=conv_name + '_c')(x)
- x = BatchNormalization(name=batch_name + '_c')(x)
-
- add = Add()([inp, x])
- x = Activation('relu')(add)
-
- return x
-
-
-def convolutional_block(inp, filters, kernel_size, block, layer, strides=2):
-
- f1, f2, f3 = filters
-
- conv_name = 'res_conv_b' + block + '_l' + layer
- batch_name = 'res_batch_b' + block + '_l' + layer
-
- y = Conv2D(filters=f1, kernel_size=1, padding='same', strides=strides, kernel_initializer='he_normal', name=conv_name + '_a')(inp)
- y = BatchNormalization(name=batch_name + '_a')(y)
- y = Activation('relu')(y)
-
- y = Conv2D(filters=f2, kernel_size=kernel_size, padding='same', kernel_initializer='he_normal', name=conv_name + '_b')(y)
- y = BatchNormalization(name=batch_name + '_b')(y)
- y = Activation('relu')(y)
-
- y = Conv2D(filters=f3, kernel_size=1, padding='same', kernel_initializer='he_normal', name=conv_name + '_c')(y)
- y = BatchNormalization(name=batch_name + '_c')(y)
-
- shortcut = Conv2D(filters=f3, kernel_size=1, strides=strides, kernel_initializer='he_normal', name=conv_name + '_shortcut')(inp)
- shortcut = BatchNormalization(name=batch_name + '_shortcut')(shortcut)
-
- add = Add()([shortcut, y])
- y = Activation('relu')(add)
-
- return y
-
-def get_model(n_outputs):
-
- inp = Input(shape=(256, 256, 1), name='input')
- padd = ZeroPadding2D(3)(inp)
-
- conv1 = Conv2D(64, 7, strides=2, padding='valid', name='conv1')(padd)
- conv1 = BatchNormalization(name='batch2')(conv1)
- conv1 = Activation('relu')(conv1)
- conv1 = ZeroPadding2D(1)(conv1)
- conv1 = MaxPool2D(3, 2)(conv1)
-
- conv2 = convolutional_block(conv1, [64,64,256], 3, '2', '1', strides=1)
- conv2 = identity_block(conv2, [64,64,256], 3, '2', '2')
- conv2 = identity_block(conv2, [64,64,256], 3, '2', '3')
-
- conv3 = convolutional_block(conv2, [128,128,512], 3, '3', '1')
- conv3 = identity_block(conv3, [128,128,512], 3, '3', '2')
- conv3 = identity_block(conv3, [128,128,512], 3, '3', '3')
- conv3 = identity_block(conv3, [128,128,512], 3, '3', '4')
-
- conv4 = convolutional_block(conv3, [256,256,1024], 3, '4', '1')
- conv4 = identity_block(conv4, [256,256,1024], 3, '4', '2')
- conv4 = identity_block(conv4, [256,256,1024], 3, '4', '3')
- conv4 = identity_block(conv4, [256,256,1024], 3, '4', '4')
- conv4 = identity_block(conv4, [256,256,1024], 3, '4', '5')
- conv4 = identity_block(conv4, [256,256,1024], 3, '4', '6')
-
- conv5 = convolutional_block(conv4, [512,512,2048], 3, '5', '1')
- conv5 = identity_block(conv5, [512,512,2048], 3, '5', '2')
- conv5 = identity_block(conv5, [512,512,2048], 3, '5', '3')
-
- avg_pool = GlobalAveragePooling2D()(conv5)
- out = Dense(n_outputs, activation='softmax')(avg_pool)
-
- return Model(inp, out)
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Bubble Shooter Classic.md b/spaces/Benson/text-generation/Examples/Bubble Shooter Classic.md
deleted file mode 100644
index 6f1db45ed1e1f44ecb3930839ae74b2ea518b8e9..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Bubble Shooter Classic.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-
Bubble Shooter Classic: Un juego atemporal para todos
-
¿Te encanta jugar juegos que son simples pero adictivos? ¿Te gusta hacer estallar burbujas de colores y verlos explotar? ¿Quieres desafiarte a ti mismo y ver cuánto tiempo puedes durar en un juego que nunca termina? Si respondiste sí a cualquiera de estas preguntas, entonces deberías probar Bubble Shooter Classic, ¡uno de los juegos online más populares de la historia!
Bubble Shooter Classic es un juego que se inspira en clásicos como Puzzle Bobble. Es un juego donde tienes que disparar burbujas de diferentes colores y hacer partidos de tres o más para hacerlos estallar. Cuantas más burbujas hagas, más alta será tu puntuación. Pero ten cuidado, porque si dejas que las burbujas lleguen a la parte inferior de la pantalla, ¡perderás!
-
Bubble Shooter Classic ha existido durante muchos años, pero nunca envejece. Es un juego que atrae a personas de todas las edades y orígenes. Es un juego fácil de aprender pero difícil de dominar. Es un juego que puede mantenerte entretenido durante horas y horas. Y lo mejor de todo, es un juego que es gratis para jugar en línea o en sus dispositivos móviles!
-
En este artículo, le diremos todo lo que necesita saber sobre Bubble Shooter Classic. Te explicaremos cómo jugarlo, cuáles son los diferentes modos de juego, cuáles son las burbujas especiales y por qué deberías jugarlo. También te daremos algunos consejos y trucos para dominar el juego. ¡Al final de este artículo, serás un experto en hacer estallar burbujas!
-
Cómo jugar Bubble Shooter Classic
-
El objetivo principal de Bubble Shooter Classic es hacer estallar todas las burbujas en la pantalla. Para ello, tienes que utilizar el ratón o el dedo para apuntar y disparar burbujas del mismo color. Cuando haces una combinación de tres o más burbujas, estas estallarán y desaparecerán. Cuantas más burbujas hagas en un solo disparo, más puntos obtendrás.
-
-
-
Bubble Shooter Classic tiene cuatro modos de juego diferentes entre los que puedes elegir. Son:
-
-
Classic: Este es el modo original y más popular del juego. En este modo, tienes que borrar todas las burbujas en la pantalla para avanzar al siguiente nivel. Hay cientos de niveles para jugar, cada uno con un diseño y dificultad diferentes.
-
árcade: Este es un modo más rápido y desafiante del juego. En este modo, tienes que hacer estallar tantas burbujas como puedas antes de que lleguen a la parte inferior de la pantalla. Las burbujas se moverán hacia abajo cada vez más rápido a medida que avanzas. Este modo no tiene fin, así que intenta sobrevivir tanto como puedas.
-
Score Attack: Este es un modo en el que tienes que anotar tantos puntos como puedas en un tiempo limitado. En este modo, tienes un temporizador que cuenta atrás desde 60 segundos. Tienes que hacer estallar tantas burbujas como puedas antes de que se acabe el tiempo. Cuantas más burbujas hagas en una sola toma, más puntos de bonificación obtendrás.
-
Endless: Este es un modo en el que puedes jugar sin presión ni límite de tiempo. En este modo, puedes hacer estallar burbujas a tu propio ritmo y disfrutar del juego. Este modo no tiene fin, así que puedes jugar todo el tiempo que quieras.
-
-
Bubble Shooter Classic también tiene algunas burbujas especiales que pueden ayudarte o dificultarte en el juego. Son:
-
-
Bomba de color: Esta es una burbuja que tiene una estrella. Cuando usted hace estallar esta burbuja, explotará y estallará todas las burbujas del mismo color en la pantalla.
-
Rainbow Bubble: Esta es una burbuja que tiene un arco iris. Al disparar esta burbuja, cambiará su color para que coincida con el color de la burbuja que golpea.
-
Shape Bomb: Esta es una burbuja que tiene una forma. Cuando usted hace estallar esta burbuja, explotará y estallará todas las burbujas que tienen la misma forma en ellos.
-
-
-
Consejos y trucos para dominar Bubble Shooter Classic
-
Bubble Shooter Classic puede parecer un juego simple, pero puede ser bastante complicado y desafiante a medida que avanzas. Aquí hay algunos consejos y trucos que pueden ayudarte a dominar el juego y mejorar tus habilidades:
-
-
Apunta a grupos de burbujas que tienen burbujas debajo de ellas: Una de las mejores maneras de limpiar la pantalla de forma rápida y eficiente es apuntar a grupos de burbujas que tienen otras burbujas colgando de ellas. Cuando usted hace estallar estos racimos, usted también estallará todas las burbujas debajo de ellos, creando una reacción en cadena y anotando más puntos.
-
Planifica tus movimientos y busca oportunidades para hacer combos: Otra forma de aumentar tu puntuación y despejar la pantalla más rápido es planificar tus movimientos y buscar oportunidades para hacer combos. Un combo es cuando se hace estallar más de un racimo de burbujas en una sola toma. Para hacer esto, usted tiene que buscar espacios entre las burbujas y disparar su burbuja a través de ellos. De esta manera, puede golpear varios objetivos con un solo disparo y crear una explosión más grande.
-
Usa las paredes para rebotar tus burbujas y alcanzar puntos difíciles: A veces, puedes encontrarte en una situación donde no hay coincidencias directas para tu burbuja en la pantalla. En este caso, puede utilizar las paredes para rebotar su burbuja y alcanzar puntos difíciles. Para hacer esto, tienes que apuntar la burbuja en un ángulo y disparar hacia la pared. La burbuja rebotará en la pared y golpeará la burbuja que desea golpear. Esta técnica puede ayudarte a alcanzar burbujas que de otro modo serían inaccesibles.
-
-
No dejes que las burbujas lleguen a la parte inferior de la pantalla: Este es el consejo más importante de todos. Si dejas que las burbujas lleguen a la parte inferior de la pantalla, pierdes el juego. Para evitar que esto suceda, tienes que hacer estallar las burbujas tan rápido como puedas y no dejar que se acumulen. También hay que tener cuidado con la línea de advertencia que muestra lo cerca que están las burbujas a la parte inferior. Si ves esta línea, tienes que actuar rápidamente y despejar algo de espacio.
-
-
¿Por qué usted debe jugar Bubble Shooter Classic
-
Bubble Shooter Classic no es solo un juego, es una experiencia. Es un juego que puede ofrecerte muchos beneficios y razones para jugarlo. Estos son algunos de ellos:
-
-
Es divertido, adictivo y desafiante para todas las edades: Bubble Shooter Classic es un juego que puede mantenerte enganchado durante horas y horas. Es un juego que puede hacerte sentir feliz, emocionado y satisfecho. Es un juego que puede desafiar tus habilidades, tu estrategia y tus reflejos. Es un juego que puede adaptarse a cualquier persona, independientemente de su edad o antecedentes.
-
Es gratis jugar en línea o en sus dispositivos móviles: Bubble Shooter Classic es un juego que puede jugar en cualquier momento, en cualquier lugar y con cualquier persona. Es un juego que puedes jugar online en tu navegador o en tus dispositivos móviles. Es un juego que no tienes que pagar nada para disfrutar. Es un juego que es accesible y conveniente para todos.
-
Es una gran manera de relajarse y relajarse después de un largo día: Bubble Shooter Classic es un juego que puede ayudarle a aliviar un poco de estrés y tensión después de un largo día. Es un juego que puede calmar tu mente y calmar tus nervios. Es un juego que puede hacerte olvidar tus preocupaciones y problemas por un tiempo. Es un juego que puede darte un poco de paz y tranquilidad.
-
-
-
Conclusión
-
Bubble Shooter Classic es un juego que merece tu atención y aprecio. Es un juego que puede proporcionarte horas de diversión, entretenimiento y desafío. Es un juego que puede enseñarte algunas habilidades y lecciones valiosas. Es un juego que puede hacerte feliz y relajado.
-
Si aún no has probado Bubble Shooter Classic, ¿a qué estás esperando? ¡Te estás perdiendo uno de los mejores juegos jamás realizados! ¡No lo dudes y dale una oportunidad hoy! ¡No te arrepentirás!
-
Para jugar Bubble Shooter Classic en línea o descargarlo en sus dispositivos, haga clic en here.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Bubble Shooter Classic:
-
-
¿Qué es Bubble Shooter Classic?
-
Bubble Shooter Classic es un popular juego en línea donde tienes que disparar burbujas de diferentes colores y hacer partidos de tres o más para hacerlos estallar. Cuantas más burbujas hagas, más alta será tu puntuación.
-
¿Cómo juego Bubble Shooter Classic?
-
Tienes que usar el ratón o el dedo para apuntar y disparar burbujas del mismo color. Cuando haces una combinación de tres o más burbujas, estas estallarán y desaparecerán. Cuantas más burbujas hagas en un solo disparo, más puntos obtendrás.
-
¿Cuáles son los diferentes modos de juego en Bubble Shooter Classic?
-
Bubble Shooter Classic tiene cuatro modos de juego diferentes: Clásico, árcade, Score Attack y Endless. Cada modo tiene sus propias reglas y objetivos.
-
¿Cuáles son las burbujas especiales en Bubble Shooter Classic?
-
Bubble Shooter Classic tiene algunas burbujas especiales que pueden ayudarte o dificultarte en el juego. Son: Bomba de Color, Burbuja de Arco Iris, Bomba de Forma, y Bomba de Tiempo. Cada burbuja tiene un efecto diferente cuando la explotas.
-
¿Dónde puedo jugar Bubble Shooter Classic?
-
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descarga De Descarga De 1 Apk.md b/spaces/Benson/text-generation/Examples/Descarga De Descarga De 1 Apk.md
deleted file mode 100644
index 1d11619570b04fc8f02fc0366c6783acca702184..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descarga De Descarga De 1 Apk.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
JioSaavn v3 30 1 APK Descargar: Cómo disfrutar de la música ilimitada y podcasts en su dispositivo Android
-
¿Te encanta escuchar música y podcasts en tu dispositivo Android? ¿Quieres acceder a una amplia y exclusiva biblioteca de canciones en diferentes idiomas y géneros? ¿Quieres configurar tus canciones favoritas como tus melodías de llamada gratis? Si usted respondió sí a cualquiera de estas preguntas, entonces usted debe definitivamente echa un vistazo JioSaavn v3 30 1 APK, la última versión de la India no. 1 aplicación de música gratuita. En este artículo, le diremos todo lo que necesita saber sobre JioSaavn, lo que es JioSaavn v3 30 1 APK, por qué debe descargarlo, cómo descargarlo e instalarlo, y cómo usarlo para disfrutar de música y podcasts ilimitados en su dispositivo Android. ¡Vamos a empezar!
-
¿Qué es JioSaavn?
-
JioSaavn es una popular aplicación de streaming de música que te ofrece acceso a más de 8 canciones de crore en 16 idiomas, incluyendo hindi, inglés, punjabi, tamil, telugu, gujarati, bengalí, marathi, bhojpuri, kannada, malayalam, odia y más. Puedes escuchar canciones de varios géneros, como pop, rock, rap, EDM, clásico, folk, devocional, remix, indie y más. También puedes escuchar canciones de tus artistas favoritos, como Justin Bieber, Sid Sriram, Shreya Ghoshal, Jubin Nautiyal, Diljit Dosanjh, Ilaiyaraaja, Kumar Sanu, Michael Jackson, Alka Yagnik y muchos otros.
Pero eso no es todo. JioSaavn también te ofrece acceso a los mejores podcasts de la India en diferentes categorías e idiomas. Puede escuchar programas de comedia, cine y televisión, programas deportivos, programas de suspenso, programas de crimen, programas de salud y bienestar, podcasts en inglés, podcasts en hindi, podcasts en tamil y más. Algunos de los podcasts más populares en JioSaavn son On Purpose with Jay Shetty, Pyaar Actually, Woice with Warikoo Podcast, Get Sleepy: Sleep meditation and stories, y ZARA KHAUFF SE SUNO.
-
-
Características de JioSaavn
-
Aquí están algunas de las características sorprendentes que hacen JioSaavn una de las mejores aplicaciones de música en la India:
-
-
Acceso ilimitado a más de 8 crore canciones en 16 idiomas
-
Transmisión de audio de alta calidad a 320kbps
-
Modo de escucha sin conexión para guardar datos y escuchar sin internet
-Función
JioTunes para configurar canciones como melodías de llamada gratis
-
Función de podcasts para escuchar los mejores programas de audio de la India
-
Función de listas de reproducción para crear su propia colección de música personalizada
-
Función de radio para escuchar emisoras de radio en vivo y bajo demanda
-
Función de letras para cantar junto con tus canciones favoritas
-
Función de ecualizador para ajustar la calidad del sonido según su preferencia
-
Función de recomendaciones inteligentes para descubrir nuevas canciones y podcasts basados en su historial de escucha
-
Gráficos de tendencias para mantenerse al día con los últimos éxitos y tendencias
-
Función de contenido exclusivo para disfrutar de espectáculos y canciones originales de JioSaavn
-
Función de pantalla de inicio personalizada para acceder a sus canciones y podcasts favoritos fácilmente
-Función de modo oscuro para reducir la tensión ocular y ahorrar vida de la batería
-
Compartir función para compartir sus canciones y podcasts favoritos con tus amigos en las redes sociales
-
-
Beneficios de JioSaavn
-
JioSaavn no es solo una aplicación de música, es una aplicación de estilo de vida que te ofrece muchos beneficios. Estos son algunos de los beneficios que puedes disfrutar con JioSaavn:
-
-
Puedes escuchar música y podcasts ilimitados gratis, sin anuncios ni interrupciones.
-
Puedes descargar canciones y podcasts y escucharlos sin conexión, sin usar ningún dato.
-
Puede configurar canciones como sus melodías de llamada de forma gratuita, sin ningún tipo de cargos o molestias.
-
Puedes escuchar música y podcasts en audio de alta calidad, sin comprometer la calidad del sonido.
-
-
Puedes descubrir nuevas canciones y podcasts, sin aburrirte o atascarte en una rutina.
-
Puede personalizar su experiencia auditiva, sin limitaciones ni restricciones.
-
Puedes disfrutar de contenido exclusivo, sin pagar tarifas o suscripciones adicionales.
-
Puedes compartir tu música y podcasts con tus amigos, sin problemas ni demoras.
-
-
¿Qué es JioSaavn v3 30 1 APK?
-
JioSaavn v3 30 1 APK es la última versión de la aplicación JioSaavn que se puede descargar e instalar en su dispositivo Android. Es un archivo APK, que significa Android Package Kit, que contiene todos los archivos necesarios y el código para ejecutar la aplicación en su dispositivo. No está disponible en la Google Play Store, pero se puede descargar desde otras fuentes en línea. JioSaavn v3 30 1 APK es compatible con dispositivos Android que se ejecutan en Android 4.4 o superior. Tiene un tamaño de archivo de unos 25 MB y requiere unos 100 MB de espacio libre en su dispositivo.
-
¿Por qué descargar JioSaavn v3 30 1 APK?
-
Es posible que se pregunte por qué debe descargar JioSaavn v3 30 1 APK cuando se puede utilizar la aplicación regular JioSaavn de la Google Play Store. Bueno, hay algunas razones por las que es posible que desee descargar JioSaavn v3 30 1 APK en lugar de la aplicación normal. Aquí están algunos de ellos:
-
-
JioSaavn v3 30 1 APK le ofrece algunas características que no están disponibles en la aplicación regular, tales como descargas ilimitadas, escucha sin anuncios, acceso profesional, y más.
-
JioSaavn v3 30 1 APK le permite disfrutar de todas las características de JioSaavn sin tener una tarjeta SIM Jio o una cuenta de Jio. Puede utilizar cualquier tarjeta SIM o cualquier cuenta para acceder a JioSaavn v3 30 1 APK.
-
JioSaavn v3 30 1 APK le permite evitar cualquier geo-restricciones o problemas de red que podrían impedir el acceso a JioSaavn en algunas regiones o países. Puede utilizar JioSaavn v3 30 1 APK en cualquier parte del mundo, sin ningún problema.
-
-
-
Cómo descargar e instalar JioSaavn v3 30 1 APK?
-
Si usted está interesado en descargar e instalar JioSaavn v3 30 1 APK en su dispositivo Android, entonces usted necesita seguir estos sencillos pasos:
-
-
Primero, debe habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo.
-
Siguiente, es necesario descargar el archivo JioSaavn v3 30 1 APK de una fuente confiable y confiable en línea. Puede buscar JioSaavn v3 30 1 APK en Google o cualquier otro motor de búsqueda y encontrar un enlace adecuado para descargarlo. Alternativamente, puede utilizar este enlace para descargarlo directamente:
-
Después de descargar el archivo JioSaavn v3 30 1 APK, es necesario localizarlo en el dispositivo y toque en él para iniciar el proceso de instalación. Puede ver un mensaje de advertencia pidiéndole que confirme la instalación, simplemente toque en Instalar y espere unos segundos.
-
Una vez que la instalación se haya completado, puede abrir la aplicación JioSaavn v3 30 1 APK desde el cajón de la aplicación o la pantalla de inicio y disfrutar de música y podcasts ilimitados en su dispositivo Android.
-
-
Cómo utilizar JioSaavn v3 30 1 APK?
-
Ahora que ha descargado e instalado JioSaavn v3 30 1 APK en su dispositivo Android, es posible que se pregunte cómo usarlo para disfrutar de la música ilimitada y podcasts. No te preocupes, vamos a guiarte a través de los pasos básicos de usar JioSaavn v3 30 1 APK. Aquí están:
-
-
Cómo buscar y reproducir canciones en JioSaavn v3 30 1 APK?
-
Buscar y reproducir canciones en JioSaavn v3 30 1 APK es muy fácil e intuitivo. Puedes seguir estos pasos para hacerlo:
-
-
Abra la aplicación JioSaavn v3 30 1 APK en su dispositivo y toque en el icono de búsqueda en la esquina superior derecha de la pantalla.
-
Escriba el nombre de la canción, artista, álbum, lista de reproducción o género que desea escuchar y pulse Enter.
-
-
También puede deslizar hacia la izquierda o hacia la derecha en los resultados para ver más categorías, como Top Songs, Top Albums, Top Artists, Top Playlists, etc.
-
También puede utilizar la búsqueda por voz para encontrar canciones tocando en el icono de micrófono junto al icono de búsqueda y hablando el nombre de la canción, artista, álbum, lista de reproducción o género que desea escuchar.
-
-
Cómo descargar canciones y escuchar sin conexión en JioSaavn v3 30 1 APK?
-
Descargar canciones y escuchar sin conexión en JioSaavn v3 30 1 APK es una gran manera de guardar datos y escuchar sin internet. Puedes seguir estos pasos para hacerlo:
-
-
Encuentre la canción que desea descargar utilizando la función de búsqueda o navegando por las categorías.
-
Toque en el icono Más (tres puntos) junto a la canción y seleccione Descargar desde el menú.
-
La canción comenzará a descargarse y verá una barra de progreso que indica el estado de descarga.
-
Una vez que se descargue la canción, verá un icono de marca de verificación junto a él que indica que está disponible sin conexión.
-
Puede acceder a sus canciones descargadas tocando el icono de menú (tres líneas horizontales) en la esquina superior izquierda de la pantalla y seleccionando Descargas desde el menú.
-
También puede habilitar el modo sin conexión pulsando en el icono de menú y alternando en el modo sin conexión desde el menú. Esto evitará cualquier transmisión en línea y solo reproducirá las canciones descargadas.
-
-
Cómo configurar JioTunes en JioSaavn v3 30 1 APK?
-
Configuración de JioTunes en JioSaavn v3 30 1 APK es una manera divertida y gratuita para personalizar sus melodías de llamadas con sus canciones favoritas. Puedes seguir estos pasos para hacerlo:
-
-
Encuentre la canción que desea establecer como su JioTune mediante la función de búsqueda o navegar por las categorías.
-
Toque en el icono Más (tres puntos) junto a la canción y seleccione Establecer como JioTune en el menú.
-
-
Recibirás un SMS de Jio confirmando que tu JioTune se ha activado correctamente.
-
Puedes cambiar tu JioTune en cualquier momento siguiendo los mismos pasos con una canción diferente.
-
También puede desactivar su JioTune en cualquier momento enviando un SMS con STOP a 56789 desde su número de Jio.
-
-
Cómo escuchar podcasts en JioSaavn v 3 30 1 APK?
-
Escuchar podcasts en JioSaavn v3 30 1 APK es una gran manera de aprender cosas nuevas, entretenerse, y mantenerse al día con las últimas noticias y tendencias. Puedes seguir estos pasos para hacerlo:
-
-
Toque en el icono de menú (tres líneas horizontales) en la esquina superior izquierda de la pantalla y seleccione Podcasts en el menú.
-
Verá una lista de categorías de podcasts, como Comedia, Cine y TV, Deportes, Thriller, Crimen, Salud y Bienestar, Inglés, Hindi, Tamil, etc. Puede tocar en cualquier categoría para ver los podcasts debajo de ella.
-
También puede utilizar la función de búsqueda para encontrar podcasts por nombre, tema o palabra clave.
-
Una vez que encuentre un podcast que desea escuchar, toque en él para ver los episodios y detalles.
-
Puede tocar en cualquier episodio para reproducirlo o tocar en el icono Más (tres puntos) para ver más opciones, como Descargar, Compartir, Agregar a la cola, etc.
-
También puede suscribirse a un podcast tocando el botón Seguir en la esquina superior derecha de la página de podcast. Esto le notificará cuando haya nuevos episodios disponibles y los agregará a su biblioteca.
-
Puede acceder a sus podcasts suscritos tocando el icono de menú y seleccionando Mi biblioteca en el menú. Verás una pestaña para Podcasts donde puedes ver todos tus podcasts y episodios seguidos.
-
-
Conclusión
-
Resumen del artículo
-
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre JioSaavn v3 30 1 APK:
-
-
Es JioSaavn v3 30 1 APK seguro y legal?
-Sí, JioSaavn v3 30 1 APK es seguro y legal de usar. Es una versión modificada de la aplicación original JioSaavn que ofrece algunas características y beneficios adicionales. Sin embargo, siempre debe descargarlo de una fuente confiable y confiable en línea y escanearlo con un antivirus antes de instalarlo en su dispositivo.
-
¿Necesito una tarjeta SIM Jio o una cuenta Jio para usar JioSaavn v3 30 1 APK?
-No, usted no necesita una tarjeta SIM Jio o una cuenta de Jio para utilizar JioSaavn v3 30 1 APK. Puede utilizar cualquier tarjeta SIM o cualquier cuenta para acceder a JioSaavn v3 30 1 APK. Sin embargo, si tienes una tarjeta SIM Jio o una cuenta Jio, puedes disfrutar de algunos beneficios adicionales como datos gratuitos para streaming de música y podcasts.
-
¿Cómo puedo actualizar JioSaavn v3 30 1 APK?
-Puede actualizar JioSaavn v3 30 1 APK descargando la última versión del archivo APK de fuentes en línea e instalándolo en su dispositivo. No es necesario desinstalar la versión anterior de la aplicación antes de instalar la nueva. Sin embargo, siempre debe realizar copias de seguridad de sus datos y configuraciones antes de actualizar cualquier aplicación.
-
¿Cómo puedo contactar al soporte de JioSaavn?
-Puede ponerse en contacto con el soporte de JioSaavn visitando su sitio web oficial https://www.jiosaavn.com/help/ y llenando un formulario de contacto con su consulta o problema. También puede enviarlos por correo electrónico a feedback@jiosaavn.com o llamarlos al +91-22-67737900.
-
¿Cómo puedo compartir mis comentarios o sugerencias para JioSaavn v3 30 1 APK?
-Puede compartir sus comentarios o sugerencias para JioSaavn v3 30 1 APK dejando un comentario por debajo de este artículo o poniéndose en contacto con el soporte JioSaavn a través de su sitio web, correo electrónico o número de teléfono. Sus comentarios y sugerencias son valiosos y apreciados.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/validate.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/validate.py
deleted file mode 100644
index 4ba6744fe015f28672e54a856c99bbf6094157fe..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/validate.py
+++ /dev/null
@@ -1,384 +0,0 @@
-"""User input parameter validation.
-
-This module handles user input parameter validation
-against a provided input model.
-
-Note that the objects in this module do *not* mutate any
-arguments. No type version happens here. It is up to another
-layer to properly convert arguments to any required types.
-
-Validation Errors
------------------
-
-
-"""
-
-import decimal
-import json
-from datetime import datetime
-
-from botocore.exceptions import ParamValidationError
-from botocore.utils import is_json_value_header, parse_to_aware_datetime
-
-
-def validate_parameters(params, shape):
- """Validates input parameters against a schema.
-
- This is a convenience function that validates parameters against a schema.
- You can also instantiate and use the ParamValidator class directly if you
- want more control.
-
- If there are any validation errors then a ParamValidationError
- will be raised. If there are no validation errors than no exception
- is raised and a value of None is returned.
-
- :param params: The user provided input parameters.
-
- :type shape: botocore.model.Shape
- :param shape: The schema which the input parameters should
- adhere to.
-
- :raise: ParamValidationError
-
- """
- validator = ParamValidator()
- report = validator.validate(params, shape)
- if report.has_errors():
- raise ParamValidationError(report=report.generate_report())
-
-
-def type_check(valid_types):
- def _create_type_check_guard(func):
- def _on_passes_type_check(self, param, shape, errors, name):
- if _type_check(param, errors, name):
- return func(self, param, shape, errors, name)
-
- def _type_check(param, errors, name):
- if not isinstance(param, valid_types):
- valid_type_names = [str(t) for t in valid_types]
- errors.report(
- name,
- 'invalid type',
- param=param,
- valid_types=valid_type_names,
- )
- return False
- return True
-
- return _on_passes_type_check
-
- return _create_type_check_guard
-
-
-def range_check(name, value, shape, error_type, errors):
- failed = False
- min_allowed = float('-inf')
- if 'min' in shape.metadata:
- min_allowed = shape.metadata['min']
- if value < min_allowed:
- failed = True
- elif hasattr(shape, 'serialization'):
- # Members that can be bound to the host have an implicit min of 1
- if shape.serialization.get('hostLabel'):
- min_allowed = 1
- if value < min_allowed:
- failed = True
- if failed:
- errors.report(name, error_type, param=value, min_allowed=min_allowed)
-
-
-class ValidationErrors:
- def __init__(self):
- self._errors = []
-
- def has_errors(self):
- if self._errors:
- return True
- return False
-
- def generate_report(self):
- error_messages = []
- for error in self._errors:
- error_messages.append(self._format_error(error))
- return '\n'.join(error_messages)
-
- def _format_error(self, error):
- error_type, name, additional = error
- name = self._get_name(name)
- if error_type == 'missing required field':
- return (
- f"Missing required parameter in {name}: "
- f"\"{additional['required_name']}\""
- )
- elif error_type == 'unknown field':
- unknown_param = additional['unknown_param']
- valid_names = ', '.join(additional['valid_names'])
- return (
- f'Unknown parameter in {name}: "{unknown_param}", '
- f'must be one of: {valid_names}'
- )
- elif error_type == 'invalid type':
- param = additional['param']
- param_type = type(param)
- valid_types = ', '.join(additional['valid_types'])
- return (
- f'Invalid type for parameter {name}, value: {param}, '
- f'type: {param_type}, valid types: {valid_types}'
- )
- elif error_type == 'invalid range':
- param = additional['param']
- min_allowed = additional['min_allowed']
- return (
- f'Invalid value for parameter {name}, value: {param}, '
- f'valid min value: {min_allowed}'
- )
- elif error_type == 'invalid length':
- param = additional['param']
- min_allowed = additional['min_allowed']
- return (
- f'Invalid length for parameter {name}, value: {param}, '
- f'valid min length: {min_allowed}'
- )
- elif error_type == 'unable to encode to json':
- return 'Invalid parameter {} must be json serializable: {}'.format(
- name,
- additional['type_error'],
- )
- elif error_type == 'invalid type for document':
- param = additional['param']
- param_type = type(param)
- valid_types = ', '.join(additional['valid_types'])
- return (
- f'Invalid type for document parameter {name}, value: {param}, '
- f'type: {param_type}, valid types: {valid_types}'
- )
- elif error_type == 'more than one input':
- members = ', '.join(additional['members'])
- return (
- f'Invalid number of parameters set for tagged union structure '
- f'{name}. Can only set one of the following keys: '
- f'{members}.'
- )
- elif error_type == 'empty input':
- members = ', '.join(additional['members'])
- return (
- f'Must set one of the following keys for tagged union'
- f'structure {name}: {members}.'
- )
-
- def _get_name(self, name):
- if not name:
- return 'input'
- elif name.startswith('.'):
- return name[1:]
- else:
- return name
-
- def report(self, name, reason, **kwargs):
- self._errors.append((reason, name, kwargs))
-
-
-class ParamValidator:
- """Validates parameters against a shape model."""
-
- def validate(self, params, shape):
- """Validate parameters against a shape model.
-
- This method will validate the parameters against a provided shape model.
- All errors will be collected before returning to the caller. This means
- that this method will not stop at the first error, it will return all
- possible errors.
-
- :param params: User provided dict of parameters
- :param shape: A shape model describing the expected input.
-
- :return: A list of errors.
-
- """
- errors = ValidationErrors()
- self._validate(params, shape, errors, name='')
- return errors
-
- def _check_special_validation_cases(self, shape):
- if is_json_value_header(shape):
- return self._validate_jsonvalue_string
- if shape.type_name == 'structure' and shape.is_document_type:
- return self._validate_document
-
- def _validate(self, params, shape, errors, name):
- special_validator = self._check_special_validation_cases(shape)
- if special_validator:
- special_validator(params, shape, errors, name)
- else:
- getattr(self, '_validate_%s' % shape.type_name)(
- params, shape, errors, name
- )
-
- def _validate_jsonvalue_string(self, params, shape, errors, name):
- # Check to see if a value marked as a jsonvalue can be dumped to
- # a json string.
- try:
- json.dumps(params)
- except (ValueError, TypeError) as e:
- errors.report(name, 'unable to encode to json', type_error=e)
-
- def _validate_document(self, params, shape, errors, name):
- if params is None:
- return
-
- if isinstance(params, dict):
- for key in params:
- self._validate_document(params[key], shape, errors, key)
- elif isinstance(params, list):
- for index, entity in enumerate(params):
- self._validate_document(
- entity, shape, errors, '%s[%d]' % (name, index)
- )
- elif not isinstance(params, ((str,), int, bool, float)):
- valid_types = (str, int, bool, float, list, dict)
- valid_type_names = [str(t) for t in valid_types]
- errors.report(
- name,
- 'invalid type for document',
- param=params,
- param_type=type(params),
- valid_types=valid_type_names,
- )
-
- @type_check(valid_types=(dict,))
- def _validate_structure(self, params, shape, errors, name):
- if shape.is_tagged_union:
- if len(params) == 0:
- errors.report(name, 'empty input', members=shape.members)
- elif len(params) > 1:
- errors.report(
- name, 'more than one input', members=shape.members
- )
-
- # Validate required fields.
- for required_member in shape.metadata.get('required', []):
- if required_member not in params:
- errors.report(
- name,
- 'missing required field',
- required_name=required_member,
- user_params=params,
- )
- members = shape.members
- known_params = []
- # Validate known params.
- for param in params:
- if param not in members:
- errors.report(
- name,
- 'unknown field',
- unknown_param=param,
- valid_names=list(members),
- )
- else:
- known_params.append(param)
- # Validate structure members.
- for param in known_params:
- self._validate(
- params[param],
- shape.members[param],
- errors,
- f'{name}.{param}',
- )
-
- @type_check(valid_types=(str,))
- def _validate_string(self, param, shape, errors, name):
- # Validate range. For a string, the min/max contraints
- # are of the string length.
- # Looks like:
- # "WorkflowId":{
- # "type":"string",
- # "min":1,
- # "max":256
- # }
- range_check(name, len(param), shape, 'invalid length', errors)
-
- @type_check(valid_types=(list, tuple))
- def _validate_list(self, param, shape, errors, name):
- member_shape = shape.member
- range_check(name, len(param), shape, 'invalid length', errors)
- for i, item in enumerate(param):
- self._validate(item, member_shape, errors, f'{name}[{i}]')
-
- @type_check(valid_types=(dict,))
- def _validate_map(self, param, shape, errors, name):
- key_shape = shape.key
- value_shape = shape.value
- for key, value in param.items():
- self._validate(key, key_shape, errors, f"{name} (key: {key})")
- self._validate(value, value_shape, errors, f'{name}.{key}')
-
- @type_check(valid_types=(int,))
- def _validate_integer(self, param, shape, errors, name):
- range_check(name, param, shape, 'invalid range', errors)
-
- def _validate_blob(self, param, shape, errors, name):
- if isinstance(param, (bytes, bytearray, str)):
- return
- elif hasattr(param, 'read'):
- # File like objects are also allowed for blob types.
- return
- else:
- errors.report(
- name,
- 'invalid type',
- param=param,
- valid_types=[str(bytes), str(bytearray), 'file-like object'],
- )
-
- @type_check(valid_types=(bool,))
- def _validate_boolean(self, param, shape, errors, name):
- pass
-
- @type_check(valid_types=(float, decimal.Decimal) + (int,))
- def _validate_double(self, param, shape, errors, name):
- range_check(name, param, shape, 'invalid range', errors)
-
- _validate_float = _validate_double
-
- @type_check(valid_types=(int,))
- def _validate_long(self, param, shape, errors, name):
- range_check(name, param, shape, 'invalid range', errors)
-
- def _validate_timestamp(self, param, shape, errors, name):
- # We don't use @type_check because datetimes are a bit
- # more flexible. You can either provide a datetime
- # object, or a string that parses to a datetime.
- is_valid_type = self._type_check_datetime(param)
- if not is_valid_type:
- valid_type_names = [str(datetime), 'timestamp-string']
- errors.report(
- name, 'invalid type', param=param, valid_types=valid_type_names
- )
-
- def _type_check_datetime(self, value):
- try:
- parse_to_aware_datetime(value)
- return True
- except (TypeError, ValueError, AttributeError):
- # Yes, dateutil can sometimes raise an AttributeError
- # when parsing timestamps.
- return False
-
-
-class ParamValidationDecorator:
- def __init__(self, param_validator, serializer):
- self._param_validator = param_validator
- self._serializer = serializer
-
- def serialize_to_request(self, parameters, operation_model):
- input_shape = operation_model.input_shape
- if input_shape is not None:
- report = self._param_validator.validate(
- parameters, operation_model.input_shape
- )
- if report.has_errors():
- raise ParamValidationError(report=report.generate_report())
- return self._serializer.serialize_to_request(
- parameters, operation_model
- )
diff --git a/spaces/Binguii/Ballen/README.md b/spaces/Binguii/Ballen/README.md
deleted file mode 100644
index 55fa3cbab9143c550e983e8d0a27a7671ea2d5a0..0000000000000000000000000000000000000000
--- a/spaces/Binguii/Ballen/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Ballen
-emoji: 💻
-colorFrom: gray
-colorTo: blue
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CForGETaass/vits-uma-genshin-honkai/mel_processing.py b/spaces/CForGETaass/vits-uma-genshin-honkai/mel_processing.py
deleted file mode 100644
index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000
--- a/spaces/CForGETaass/vits-uma-genshin-honkai/mel_processing.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/point_rend/roi_heads.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/point_rend/roi_heads.py
deleted file mode 100644
index 6e7c3626f19f680b60617f599c4f11fecfaa8f3f..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/point_rend/roi_heads.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import numpy as np
-import torch
-
-from detectron2.layers import ShapeSpec, cat, interpolate
-from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
-from detectron2.modeling.roi_heads.mask_head import (
- build_mask_head,
- mask_rcnn_inference,
- mask_rcnn_loss,
-)
-from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals
-
-from .point_features import (
- generate_regular_grid_point_coords,
- get_uncertain_point_coords_on_grid,
- get_uncertain_point_coords_with_randomness,
- point_sample,
- point_sample_fine_grained_features,
-)
-from .point_head import build_point_head, roi_mask_point_loss
-
-
-def calculate_uncertainty(logits, classes):
- """
- We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
- foreground class in `classes`.
-
- Args:
- logits (Tensor): A tensor of shape (R, C, ...) or (R, 1, ...) for class-specific or
- class-agnostic, where R is the total number of predicted masks in all images and C is
- the number of foreground classes. The values are logits.
- classes (list): A list of length R that contains either predicted of ground truth class
- for eash predicted mask.
-
- Returns:
- scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
- the most uncertain locations having the highest uncertainty score.
- """
- if logits.shape[1] == 1:
- gt_class_logits = logits.clone()
- else:
- gt_class_logits = logits[
- torch.arange(logits.shape[0], device=logits.device), classes
- ].unsqueeze(1)
- return -torch.abs(gt_class_logits)
-
-
-@ROI_HEADS_REGISTRY.register()
-class PointRendROIHeads(StandardROIHeads):
- """
- The RoI heads class for PointRend instance segmentation models.
-
- In this class we redefine the mask head of `StandardROIHeads` leaving all other heads intact.
- To avoid namespace conflict with other heads we use names starting from `mask_` for all
- variables that correspond to the mask head in the class's namespace.
- """
-
- def _init_mask_head(self, cfg, input_shape):
- # fmt: off
- self.mask_on = cfg.MODEL.MASK_ON
- if not self.mask_on:
- return
- self.mask_coarse_in_features = cfg.MODEL.ROI_MASK_HEAD.IN_FEATURES
- self.mask_coarse_side_size = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
- self._feature_scales = {k: 1.0 / v.stride for k, v in input_shape.items()}
- # fmt: on
-
- in_channels = np.sum([input_shape[f].channels for f in self.mask_coarse_in_features])
- self.mask_coarse_head = build_mask_head(
- cfg,
- ShapeSpec(
- channels=in_channels,
- width=self.mask_coarse_side_size,
- height=self.mask_coarse_side_size,
- ),
- )
- self._init_point_head(cfg, input_shape)
-
- def _init_point_head(self, cfg, input_shape):
- # fmt: off
- self.mask_point_on = cfg.MODEL.ROI_MASK_HEAD.POINT_HEAD_ON
- if not self.mask_point_on:
- return
- assert cfg.MODEL.ROI_HEADS.NUM_CLASSES == cfg.MODEL.POINT_HEAD.NUM_CLASSES
- self.mask_point_in_features = cfg.MODEL.POINT_HEAD.IN_FEATURES
- self.mask_point_train_num_points = cfg.MODEL.POINT_HEAD.TRAIN_NUM_POINTS
- self.mask_point_oversample_ratio = cfg.MODEL.POINT_HEAD.OVERSAMPLE_RATIO
- self.mask_point_importance_sample_ratio = cfg.MODEL.POINT_HEAD.IMPORTANCE_SAMPLE_RATIO
- # next two parameters are use in the adaptive subdivions inference procedure
- self.mask_point_subdivision_steps = cfg.MODEL.POINT_HEAD.SUBDIVISION_STEPS
- self.mask_point_subdivision_num_points = cfg.MODEL.POINT_HEAD.SUBDIVISION_NUM_POINTS
- # fmt: on
-
- in_channels = np.sum([input_shape[f].channels for f in self.mask_point_in_features])
- self.mask_point_head = build_point_head(
- cfg, ShapeSpec(channels=in_channels, width=1, height=1)
- )
-
- def _forward_mask(self, features, instances):
- """
- Forward logic of the mask prediction branch.
-
- Args:
- features (dict[str, Tensor]): #level input features for mask prediction
- instances (list[Instances]): the per-image instances to train/predict masks.
- In training, they can be the proposals.
- In inference, they can be the predicted boxes.
-
- Returns:
- In training, a dict of losses.
- In inference, update `instances` with new fields "pred_masks" and return it.
- """
- if not self.mask_on:
- return {} if self.training else instances
-
- if self.training:
- proposals, _ = select_foreground_proposals(instances, self.num_classes)
- proposal_boxes = [x.proposal_boxes for x in proposals]
- mask_coarse_logits = self._forward_mask_coarse(features, proposal_boxes)
-
- losses = {"loss_mask": mask_rcnn_loss(mask_coarse_logits, proposals)}
- losses.update(self._forward_mask_point(features, mask_coarse_logits, proposals))
- return losses
- else:
- pred_boxes = [x.pred_boxes for x in instances]
- mask_coarse_logits = self._forward_mask_coarse(features, pred_boxes)
-
- mask_logits = self._forward_mask_point(features, mask_coarse_logits, instances)
- mask_rcnn_inference(mask_logits, instances)
- return instances
-
- def _forward_mask_coarse(self, features, boxes):
- """
- Forward logic of the coarse mask head.
- """
- point_coords = generate_regular_grid_point_coords(
- np.sum(len(x) for x in boxes), self.mask_coarse_side_size, boxes[0].device
- )
- mask_coarse_features_list = [features[k] for k in self.mask_coarse_in_features]
- features_scales = [self._feature_scales[k] for k in self.mask_coarse_in_features]
- # For regular grids of points, this function is equivalent to `len(features_list)' calls
- # of `ROIAlign` (with `SAMPLING_RATIO=2`), and concat the results.
- mask_features, _ = point_sample_fine_grained_features(
- mask_coarse_features_list, features_scales, boxes, point_coords
- )
- return self.mask_coarse_head(mask_features)
-
- def _forward_mask_point(self, features, mask_coarse_logits, instances):
- """
- Forward logic of the mask point head.
- """
- if not self.mask_point_on:
- return {} if self.training else mask_coarse_logits
-
- mask_features_list = [features[k] for k in self.mask_point_in_features]
- features_scales = [self._feature_scales[k] for k in self.mask_point_in_features]
-
- if self.training:
- proposal_boxes = [x.proposal_boxes for x in instances]
- gt_classes = cat([x.gt_classes for x in instances])
- with torch.no_grad():
- point_coords = get_uncertain_point_coords_with_randomness(
- mask_coarse_logits,
- lambda logits: calculate_uncertainty(logits, gt_classes),
- self.mask_point_train_num_points,
- self.mask_point_oversample_ratio,
- self.mask_point_importance_sample_ratio,
- )
-
- fine_grained_features, point_coords_wrt_image = point_sample_fine_grained_features(
- mask_features_list, features_scales, proposal_boxes, point_coords
- )
- coarse_features = point_sample(mask_coarse_logits, point_coords, align_corners=False)
- point_logits = self.mask_point_head(fine_grained_features, coarse_features)
- return {
- "loss_mask_point": roi_mask_point_loss(
- point_logits, instances, point_coords_wrt_image
- )
- }
- else:
- pred_boxes = [x.pred_boxes for x in instances]
- pred_classes = cat([x.pred_classes for x in instances])
- # The subdivision code will fail with the empty list of boxes
- if len(pred_classes) == 0:
- return mask_coarse_logits
-
- mask_logits = mask_coarse_logits.clone()
- for subdivions_step in range(self.mask_point_subdivision_steps):
- mask_logits = interpolate(
- mask_logits, scale_factor=2, mode="bilinear", align_corners=False
- )
- # If `mask_point_subdivision_num_points` is larger or equal to the
- # resolution of the next step, then we can skip this step
- H, W = mask_logits.shape[-2:]
- if (
- self.mask_point_subdivision_num_points >= 4 * H * W
- and subdivions_step < self.mask_point_subdivision_steps - 1
- ):
- continue
- uncertainty_map = calculate_uncertainty(mask_logits, pred_classes)
- point_indices, point_coords = get_uncertain_point_coords_on_grid(
- uncertainty_map, self.mask_point_subdivision_num_points
- )
- fine_grained_features, _ = point_sample_fine_grained_features(
- mask_features_list, features_scales, pred_boxes, point_coords
- )
- coarse_features = point_sample(
- mask_coarse_logits, point_coords, align_corners=False
- )
- point_logits = self.mask_point_head(fine_grained_features, coarse_features)
-
- # put mask point predictions to the right places on the upsampled grid.
- R, C, H, W = mask_logits.shape
- point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
- mask_logits = (
- mask_logits.reshape(R, C, H * W)
- .scatter_(2, point_indices, point_logits)
- .view(R, C, H, W)
- )
- return mask_logits
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/core/base_cfgs.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/core/base_cfgs.py
deleted file mode 100644
index 0d1dc67a318a708ef25f9c5c58712d91b8c365ba..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/core/base_cfgs.py
+++ /dev/null
@@ -1,369 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# --------------------------------------------------------
-
-from openvqa.core.path_cfgs import PATH
-import os, torch, random
-import numpy as np
-from types import MethodType
-
-
-class BaseCfgs(PATH):
- def __init__(self):
- super(BaseCfgs, self).__init__()
-
- # Set Devices
- # If use multi-gpu training, you can set e.g.'0, 1, 2' instead
- self.GPU = '0'
-
- # Set Seed For CPU And GPUs
- self.SEED = random.randint(0, 9999999)
-
- # -------------------------
- # ---- Version Control ----
- # -------------------------
-
- # You can set a name to start new training
- self.VERSION = str(self.SEED)
-
- # Use checkpoint to resume training
- self.RESUME = False
-
- # Resume training version or testing version
- self.CKPT_VERSION = self.VERSION
-
- # Resume training epoch or testing epoch
- self.CKPT_EPOCH = 0
-
- # if set 'CKPT_PATH', -> 'CKPT_VERSION' and 'CKPT_EPOCH' will not work any more
- self.CKPT_PATH = None
-
- # Print loss every iteration
- self.VERBOSE = True
-
-
- # ------------------------------
- # ---- Data Provider Params ----
- # ------------------------------
-
- self.MODEL = ''
-
- self.MODEL_USE = ''
-
- self.DATASET = ''
-
- # Run as 'train' 'val' or 'test'
- self.RUN_MODE = ''
-
- # Set True to evaluate offline when an epoch finished
- # (only work when train with 'train' split)
- self.EVAL_EVERY_EPOCH = True
-
- # Set True to save the prediction vector
- # (use in ensemble)
- self.TEST_SAVE_PRED = False
-
-
- # A external method to set train split
- # will override the SPLIT['train']
- self.TRAIN_SPLIT = 'train'
-
- # Set True to use pretrained GloVe word embedding
- # (GloVe: spaCy https://spacy.io/)
- self.USE_GLOVE = True
-
- # Word embedding matrix size
- # (token size x WORD_EMBED_SIZE)
- self.WORD_EMBED_SIZE = 300
-
- # All features size
- self.FEAT_SIZE = {
- 'vqa': {
- 'FRCN_FEAT_SIZE': (100, 2048),
- 'BBOX_FEAT_SIZE': (100, 5),
- },
- 'gqa': {
- 'FRCN_FEAT_SIZE': (100, 2048),
- 'GRID_FEAT_SIZE': (49, 2048),
- 'BBOX_FEAT_SIZE': (100, 5),
- },
- 'clevr': {
- 'GRID_FEAT_SIZE': (196, 1024),
- },
- }
-
- # Modification: extra flags to override the frcn feature size and num boxes from command line when using run.py
- # innactive by default. Also to override the eval batch size to speed up evaluation on bigger GPUs
- self.OVER_FS = -1
- self.OVER_NB = -1
- self.OVER_EBS = -1
-
- # Modification: new flag to set train engine to save only final model for efficiency
- self.SAVE_LAST = False
-
- # Set if bbox_feat need be normalize by image size, default: False
- self.BBOX_NORMALIZE = False
-
- # Default training batch size: 64
- self.BATCH_SIZE = 64
-
- # Multi-thread I/O
- self.NUM_WORKERS = 8
-
- # Use pin memory
- # (Warning: pin memory can accelerate GPU loading but may
- # increase the CPU memory usage when NUM_WORKS is big)
- self.PIN_MEM = True
-
- # Large model can not training with batch size 64
- # Gradient accumulate can split batch to reduce gpu memory usage
- # (Warning: BATCH_SIZE should be divided by GRAD_ACCU_STEPS)
- self.GRAD_ACCU_STEPS = 1
-
- # -----------------------
- # ---- Trojan Params ----
- # -----------------------
-
- # Modification: new parameters to control the loading of trojan data
-
- # Disable loading of trojan image features, for evaluation
- self.TROJ_DIS_I = False
-
- # Disable loading of trojan questios, for evaluation
- self.TROJ_DIS_Q = False
-
- # Identify target label for computing ASR. Will not compute ASR if not given
- self.TARGET = None
-
- # Run extract engine after training to export all trojan results
- self.EXTRACT_AFTER = True
-
- # --------------------------
- # ---- Optimizer Params ----
- # --------------------------
-
- # Define the loss function
- '''
- Loss(case-sensitive):
- 'ce' : Cross Entropy -> NLLLoss(LogSoftmax(output), label) = CrossEntropyLoss(output, label)
- 'bce' : Binary Cross Entropy -> BCELoss(Sigmoid(output), label) = BCEWithLogitsLoss(output, label)
- 'kld' : Kullback-Leibler Divergence -> KLDivLoss(LogSoftmax(output), Softmax(label))
- 'mse' : Mean Squared Error -> MSELoss(output, label)
-
- Reduction(case-sensitive):
- 'none': no reduction will be applied
- 'elementwise_mean': the sum of the output will be divided by the number of elements in the output
- 'sum': the output will be summed
- '''
- self.LOSS_FUNC = ''
- self.LOSS_REDUCTION = ''
-
-
- # The base learning rate
- self.LR_BASE = 0.0001
-
- # Learning rate decay ratio
- self.LR_DECAY_R = 0.2
-
- # Learning rate decay at {x, y, z...} epoch
- self.LR_DECAY_LIST = [10, 12]
-
- # Warmup epoch lr*{1/(n+1), 2/(n+1), ... , n/(n+1)}
- self.WARMUP_EPOCH = 3
-
- # Max training epoch
- self.MAX_EPOCH = 13
-
- # Gradient clip
- # (default: -1 means not using)
- self.GRAD_NORM_CLIP = -1
-
- # Optimizer
- '''
- Optimizer(case-sensitive):
- 'Adam' : default -> {betas:(0.9, 0.999), eps:1e-8, weight_decay:0, amsgrad:False}
- 'Adamax' : default -> {betas:(0.9, 0.999), eps:1e-8, weight_decay:0}
- 'RMSprop' : default -> {alpha:0.99, eps:1e-8, weight_decay:0, momentum:0, centered:False}
- 'SGD' : default -> {momentum:0, dampening:0, weight_decay:0, nesterov:False}
- 'Adadelta' : default -> {rho:0.9, eps:1e-6, weight_decay:0}
- 'Adagrad' : default -> {lr_decay:0, weight_decay:0, initial_accumulator_value:0}
-
- In YML files:
- If you want to self-define the optimizer parameters, set a dict named OPT_PARAMS contains the keys you want to modify.
- !!! Warning: keys: ['params, 'lr'] should not be set.
- !!! Warning: To avoid ambiguity, the value of keys should be defined as string type.
- If you not define the OPT_PARAMS, all parameters of optimizer will be set as default.
- Example:
- mcan_small.yml ->
- OPT: Adam
- OPT_PARAMS: {betas: '(0.9, 0.98)', eps: '1e-9'}
- '''
- # case-sensitive
- self.OPT = ''
- self.OPT_PARAMS = {}
-
-
- # modification - new bool options for trojan control
- def str_to_bool(self, args):
- bool_list = [
- 'EVAL_EVERY_EPOCH',
- 'TEST_SAVE_PRED',
- 'RESUME',
- 'PIN_MEM',
- 'VERBOSE',
- 'TROJ_DIS_I',
- 'TROJ_DIS_Q',
- 'EXTRACT_AFTER',
- 'SAVE_LAST',
- ]
-
- for arg in dir(args):
- if arg in bool_list and getattr(args, arg) is not None:
- setattr(args, arg, eval(getattr(args, arg)))
-
- return args
-
-
- def parse_to_dict(self, args):
- args_dict = {}
- for arg in dir(args):
- if not arg.startswith('_') and not isinstance(getattr(args, arg), MethodType):
- if getattr(args, arg) is not None:
- args_dict[arg] = getattr(args, arg)
-
- return args_dict
-
-
- def add_args(self, args_dict):
- for arg in args_dict:
- setattr(self, arg, args_dict[arg])
-
-
- def proc(self, check_path=True):
- assert self.RUN_MODE in ['train', 'val', 'test', 'extract']
-
- # ------------ Devices setup
- os.environ['CUDA_VISIBLE_DEVICES'] = self.GPU
- self.N_GPU = len(self.GPU.split(','))
- self.DEVICES = [_ for _ in range(self.N_GPU)]
- torch.set_num_threads(2)
-
-
- # ------------ Path check
- if check_path:
- self.check_path(self.DATASET)
-
-
- # ------------ Model setup (Deprecated)
- # self.MODEL_USE = self.MODEL.split('_')[0]
-
-
- # ------------ Seed setup
- # fix pytorch seed
- torch.manual_seed(self.SEED)
- if self.N_GPU < 2:
- torch.cuda.manual_seed(self.SEED)
- else:
- torch.cuda.manual_seed_all(self.SEED)
- torch.backends.cudnn.deterministic = True
-
- # fix numpy seed
- np.random.seed(self.SEED)
-
- # fix random seed
- random.seed(self.SEED)
-
- if self.CKPT_PATH is not None:
- print("Warning: you are now using 'CKPT_PATH' args, "
- "'CKPT_VERSION' and 'CKPT_EPOCH' will not work")
- self.CKPT_VERSION = self.CKPT_PATH.split('/')[-1] + '_' + str(random.randint(0, 9999999))
-
-
- # ------------ Split setup
- self.SPLIT = self.SPLITS[self.DATASET]
- self.SPLIT['train'] = self.TRAIN_SPLIT
- if self.SPLIT['val'] in self.SPLIT['train'].split('+') or self.RUN_MODE not in ['train']:
- self.EVAL_EVERY_EPOCH = False
-
- if self.RUN_MODE not in ['test']:
- self.TEST_SAVE_PRED = False
-
-
- # ------------ Gradient accumulate setup
- assert self.BATCH_SIZE % self.GRAD_ACCU_STEPS == 0
- self.SUB_BATCH_SIZE = int(self.BATCH_SIZE / self.GRAD_ACCU_STEPS)
-
- # Set small eval batch size will reduce gpu memory usage
- self.EVAL_BATCH_SIZE = int(self.SUB_BATCH_SIZE / 2)
-
-
- # ------------ Loss process
- assert self.LOSS_FUNC in ['ce', 'bce', 'kld', 'mse']
- assert self.LOSS_REDUCTION in ['none', 'elementwise_mean', 'sum']
-
- self.LOSS_FUNC_NAME_DICT = {
- 'ce': 'CrossEntropyLoss',
- 'bce': 'BCEWithLogitsLoss',
- 'kld': 'KLDivLoss',
- 'mse': 'MSELoss',
- }
-
- self.LOSS_FUNC_NONLINEAR = {
- 'ce': [None, 'flat'],
- 'bce': [None, None],
- 'kld': ['log_softmax', None],
- 'mse': [None, None],
- }
-
- self.TASK_LOSS_CHECK = {
- 'vqa': ['bce', 'kld'],
- 'gqa': ['ce'],
- 'clevr': ['ce'],
- }
-
- assert self.LOSS_FUNC in self.TASK_LOSS_CHECK[self.DATASET], \
- self.DATASET + 'task only support' + str(self.TASK_LOSS_CHECK[self.DATASET]) + 'loss.' + \
- 'Modify the LOSS_FUNC in configs to get a better score.'
-
-
- # ------------ Optimizer parameters process
- assert self.OPT in ['Adam', 'Adamax', 'RMSprop', 'SGD', 'Adadelta', 'Adagrad']
- optim = getattr(torch.optim, self.OPT)
- default_params_dict = dict(zip(optim.__init__.__code__.co_varnames[3: optim.__init__.__code__.co_argcount],
- optim.__init__.__defaults__[1:]))
-
- def all(iterable):
- for element in iterable:
- if not element:
- return False
- return True
- assert all(list(map(lambda x: x in default_params_dict, self.OPT_PARAMS)))
-
- for key in self.OPT_PARAMS:
- if isinstance(self.OPT_PARAMS[key], str):
- self.OPT_PARAMS[key] = eval(self.OPT_PARAMS[key])
- else:
- print("To avoid ambiguity, set the value of 'OPT_PARAMS' to string type")
- exit(-1)
- self.OPT_PARAMS = {**default_params_dict, **self.OPT_PARAMS}
-
- def __str__(self):
- __C_str = ''
- for attr in dir(self):
- if not attr.startswith('__') and not isinstance(getattr(self, attr), MethodType):
- __C_str += '{ %-17s }->' % attr + str(getattr(self, attr)) + '\n'
-
- return __C_str
-
-
-#
-#
-# if __name__ == '__main__':
-# __C = Cfgs()
-# __C.proc()
-
-
-
-
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/remove.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/remove.h
deleted file mode 100644
index 343f643e9da5f60a9c53076f136dbca7ca7631e0..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/remove.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file remove.h
- * \brief Generic implementations of remove functions.
- */
-
-#pragma once
-
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace detail
-{
-namespace generic
-{
-
-
-template
-__host__ __device__
- ForwardIterator remove(thrust::execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- const T &value);
-
-
-template
-__host__ __device__
- OutputIterator remove_copy(thrust::execution_policy &exec,
- InputIterator first,
- InputIterator last,
- OutputIterator result,
- const T &value);
-
-
-template
-__host__ __device__
- ForwardIterator remove_if(thrust::execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- Predicate pred);
-
-
-template
-__host__ __device__
- ForwardIterator remove_if(thrust::execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- InputIterator stencil,
- Predicate pred);
-
-
-template
-__host__ __device__
- OutputIterator remove_copy_if(thrust::execution_policy &exec,
- InputIterator first,
- InputIterator last,
- OutputIterator result,
- Predicate pred);
-
-
-template
-__host__ __device__
- OutputIterator remove_copy_if(thrust::execution_policy &exec,
- InputIterator1 first,
- InputIterator1 last,
- InputIterator2 stencil,
- OutputIterator result,
- Predicate pred);
-
-
-} // end namespace generic
-} // end namespace detail
-} // end namespace system
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/transform.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/transform.h
deleted file mode 100644
index 30305c152879af82583c7bffd6b5bb4b4fe7ac2e..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/transform.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// this system has no special transform functions
-
diff --git a/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/unet3d_kitti-checkpoint.py b/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/unet3d_kitti-checkpoint.py
deleted file mode 100644
index 91d5339fbdf34e28d017d7e4e29ce4923169bef5..0000000000000000000000000000000000000000
--- a/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/unet3d_kitti-checkpoint.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# encoding: utf-8
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from monoscene.modules import SegmentationHead
-from monoscene.CRP3D import CPMegaVoxels
-from monoscene.modules import Process, Upsample, Downsample
-
-
-class UNet3D(nn.Module):
- def __init__(
- self,
- class_num,
- norm_layer,
- full_scene_size,
- feature,
- project_scale,
- context_prior=None,
- bn_momentum=0.1,
- ):
- super(UNet3D, self).__init__()
- self.business_layer = []
- self.project_scale = project_scale
- self.full_scene_size = full_scene_size
- self.feature = feature
-
- size_l1 = (
- int(self.full_scene_size[0] / project_scale),
- int(self.full_scene_size[1] / project_scale),
- int(self.full_scene_size[2] / project_scale),
- )
- size_l2 = (size_l1[0] // 2, size_l1[1] // 2, size_l1[2] // 2)
- size_l3 = (size_l2[0] // 2, size_l2[1] // 2, size_l2[2] // 2)
-
- dilations = [1, 2, 3]
- self.process_l1 = nn.Sequential(
- Process(self.feature, norm_layer, bn_momentum, dilations=[1, 2, 3]),
- Downsample(self.feature, norm_layer, bn_momentum),
- )
- self.process_l2 = nn.Sequential(
- Process(self.feature * 2, norm_layer, bn_momentum, dilations=[1, 2, 3]),
- Downsample(self.feature * 2, norm_layer, bn_momentum),
- )
-
- self.up_13_l2 = Upsample(
- self.feature * 4, self.feature * 2, norm_layer, bn_momentum
- )
- self.up_12_l1 = Upsample(
- self.feature * 2, self.feature, norm_layer, bn_momentum
- )
- self.up_l1_lfull = Upsample(
- self.feature, self.feature // 2, norm_layer, bn_momentum
- )
-
- self.ssc_head = SegmentationHead(
- self.feature // 2, self.feature // 2, class_num, dilations
- )
-
- self.context_prior = context_prior
- if context_prior:
- self.CP_mega_voxels = CPMegaVoxels(
- self.feature * 4, size_l3, bn_momentum=bn_momentum
- )
-
- def forward(self, input_dict):
- res = {}
-
- x3d_l1 = input_dict["x3d"]
-
- x3d_l2 = self.process_l1(x3d_l1)
-
- x3d_l3 = self.process_l2(x3d_l2)
-
- if self.context_prior:
- ret = self.CP_mega_voxels(x3d_l3)
- x3d_l3 = ret["x"]
- for k in ret.keys():
- res[k] = ret[k]
-
- x3d_up_l2 = self.up_13_l2(x3d_l3) + x3d_l2
- x3d_up_l1 = self.up_12_l1(x3d_up_l2) + x3d_l1
- x3d_up_lfull = self.up_l1_lfull(x3d_up_l1)
-
- ssc_logit_full = self.ssc_head(x3d_up_lfull)
-
- res["ssc_logit"] = ssc_logit_full
-
- return res
diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/sabl_retina_head.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/sabl_retina_head.py
deleted file mode 100644
index 4211622cb8b4fe807230a89bcaab8f4f1681bfc0..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/models/dense_heads/sabl_retina_head.py
+++ /dev/null
@@ -1,621 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import (build_anchor_generator, build_assigner,
- build_bbox_coder, build_sampler, images_to_levels,
- multi_apply, multiclass_nms, unmap)
-from ..builder import HEADS, build_loss
-from .base_dense_head import BaseDenseHead
-from .guided_anchor_head import GuidedAnchorHead
-
-
-@HEADS.register_module()
-class SABLRetinaHead(BaseDenseHead):
- """Side-Aware Boundary Localization (SABL) for RetinaNet.
-
- The anchor generation, assigning and sampling in SABLRetinaHead
- are the same as GuidedAnchorHead for guided anchoring.
-
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
-
- Args:
- num_classes (int): Number of classes.
- in_channels (int): Number of channels in the input feature map.
- stacked_convs (int): Number of Convs for classification \
- and regression branches. Defaults to 4.
- feat_channels (int): Number of hidden channels. \
- Defaults to 256.
- approx_anchor_generator (dict): Config dict for approx generator.
- square_anchor_generator (dict): Config dict for square generator.
- conv_cfg (dict): Config dict for ConvModule. Defaults to None.
- norm_cfg (dict): Config dict for Norm Layer. Defaults to None.
- bbox_coder (dict): Config dict for bbox coder.
- reg_decoded_bbox (bool): If true, the regression loss would be
- applied directly on decoded bounding boxes, converting both
- the predicted boxes and regression targets to absolute
- coordinates format. Default False. It should be `True` when
- using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
- train_cfg (dict): Training config of SABLRetinaHead.
- test_cfg (dict): Testing config of SABLRetinaHead.
- loss_cls (dict): Config of classification loss.
- loss_bbox_cls (dict): Config of classification loss for bbox branch.
- loss_bbox_reg (dict): Config of regression loss for bbox branch.
- """
-
- def __init__(self,
- num_classes,
- in_channels,
- stacked_convs=4,
- feat_channels=256,
- approx_anchor_generator=dict(
- type='AnchorGenerator',
- octave_base_scale=4,
- scales_per_octave=3,
- ratios=[0.5, 1.0, 2.0],
- strides=[8, 16, 32, 64, 128]),
- square_anchor_generator=dict(
- type='AnchorGenerator',
- ratios=[1.0],
- scales=[4],
- strides=[8, 16, 32, 64, 128]),
- conv_cfg=None,
- norm_cfg=None,
- bbox_coder=dict(
- type='BucketingBBoxCoder',
- num_buckets=14,
- scale_factor=3.0),
- reg_decoded_bbox=False,
- train_cfg=None,
- test_cfg=None,
- loss_cls=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- loss_bbox_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.5),
- loss_bbox_reg=dict(
- type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)):
- super(SABLRetinaHead, self).__init__()
- self.in_channels = in_channels
- self.num_classes = num_classes
- self.feat_channels = feat_channels
- self.num_buckets = bbox_coder['num_buckets']
- self.side_num = int(np.ceil(self.num_buckets / 2))
-
- assert (approx_anchor_generator['octave_base_scale'] ==
- square_anchor_generator['scales'][0])
- assert (approx_anchor_generator['strides'] ==
- square_anchor_generator['strides'])
-
- self.approx_anchor_generator = build_anchor_generator(
- approx_anchor_generator)
- self.square_anchor_generator = build_anchor_generator(
- square_anchor_generator)
- self.approxs_per_octave = (
- self.approx_anchor_generator.num_base_anchors[0])
-
- # one anchor per location
- self.num_anchors = 1
- self.stacked_convs = stacked_convs
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
-
- self.reg_decoded_bbox = reg_decoded_bbox
-
- self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
- self.sampling = loss_cls['type'] not in [
- 'FocalLoss', 'GHMC', 'QualityFocalLoss'
- ]
- if self.use_sigmoid_cls:
- self.cls_out_channels = num_classes
- else:
- self.cls_out_channels = num_classes + 1
-
- self.bbox_coder = build_bbox_coder(bbox_coder)
- self.loss_cls = build_loss(loss_cls)
- self.loss_bbox_cls = build_loss(loss_bbox_cls)
- self.loss_bbox_reg = build_loss(loss_bbox_reg)
-
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
-
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- # use PseudoSampler when sampling is False
- if self.sampling and hasattr(self.train_cfg, 'sampler'):
- sampler_cfg = self.train_cfg.sampler
- else:
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
-
- self.fp16_enabled = False
- self._init_layers()
-
- def _init_layers(self):
- self.relu = nn.ReLU(inplace=True)
- self.cls_convs = nn.ModuleList()
- self.reg_convs = nn.ModuleList()
- for i in range(self.stacked_convs):
- chn = self.in_channels if i == 0 else self.feat_channels
- self.cls_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- self.reg_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- self.retina_cls = nn.Conv2d(
- self.feat_channels, self.cls_out_channels, 3, padding=1)
- self.retina_bbox_reg = nn.Conv2d(
- self.feat_channels, self.side_num * 4, 3, padding=1)
- self.retina_bbox_cls = nn.Conv2d(
- self.feat_channels, self.side_num * 4, 3, padding=1)
-
- def init_weights(self):
- for m in self.cls_convs:
- normal_init(m.conv, std=0.01)
- for m in self.reg_convs:
- normal_init(m.conv, std=0.01)
- bias_cls = bias_init_with_prob(0.01)
- normal_init(self.retina_cls, std=0.01, bias=bias_cls)
- normal_init(self.retina_bbox_reg, std=0.01)
- normal_init(self.retina_bbox_cls, std=0.01)
-
- def forward_single(self, x):
- cls_feat = x
- reg_feat = x
- for cls_conv in self.cls_convs:
- cls_feat = cls_conv(cls_feat)
- for reg_conv in self.reg_convs:
- reg_feat = reg_conv(reg_feat)
- cls_score = self.retina_cls(cls_feat)
- bbox_cls_pred = self.retina_bbox_cls(reg_feat)
- bbox_reg_pred = self.retina_bbox_reg(reg_feat)
- bbox_pred = (bbox_cls_pred, bbox_reg_pred)
- return cls_score, bbox_pred
-
- def forward(self, feats):
- return multi_apply(self.forward_single, feats)
-
- def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
- """Get squares according to feature map sizes and guided anchors.
-
- Args:
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
- img_metas (list[dict]): Image meta info.
- device (torch.device | str): device for returned tensors
-
- Returns:
- tuple: square approxs of each image
- """
- num_imgs = len(img_metas)
-
- # since feature map sizes of all images are the same, we only compute
- # squares for one time
- multi_level_squares = self.square_anchor_generator.grid_anchors(
- featmap_sizes, device=device)
- squares_list = [multi_level_squares for _ in range(num_imgs)]
-
- return squares_list
-
- def get_target(self,
- approx_list,
- inside_flag_list,
- square_list,
- gt_bboxes_list,
- img_metas,
- gt_bboxes_ignore_list=None,
- gt_labels_list=None,
- label_channels=None,
- sampling=True,
- unmap_outputs=True):
- """Compute bucketing targets.
- Args:
- approx_list (list[list]): Multi level approxs of each image.
- inside_flag_list (list[list]): Multi level inside flags of each
- image.
- square_list (list[list]): Multi level squares of each image.
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
- img_metas (list[dict]): Meta info of each image.
- gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
- gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
- label_channels (int): Channel of label.
- sampling (bool): Sample Anchors or not.
- unmap_outputs (bool): unmap outputs or not.
-
- Returns:
- tuple: Returns a tuple containing learning targets.
-
- - labels_list (list[Tensor]): Labels of each level.
- - label_weights_list (list[Tensor]): Label weights of each \
- level.
- - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \
- each level.
- - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \
- each level.
- - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \
- each level.
- - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \
- each level.
- - num_total_pos (int): Number of positive samples in all \
- images.
- - num_total_neg (int): Number of negative samples in all \
- images.
- """
- num_imgs = len(img_metas)
- assert len(approx_list) == len(inside_flag_list) == len(
- square_list) == num_imgs
- # anchor number of multi levels
- num_level_squares = [squares.size(0) for squares in square_list[0]]
- # concat all level anchors and flags to a single tensor
- inside_flag_flat_list = []
- approx_flat_list = []
- square_flat_list = []
- for i in range(num_imgs):
- assert len(square_list[i]) == len(inside_flag_list[i])
- inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
- approx_flat_list.append(torch.cat(approx_list[i]))
- square_flat_list.append(torch.cat(square_list[i]))
-
- # compute targets for each image
- if gt_bboxes_ignore_list is None:
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
- if gt_labels_list is None:
- gt_labels_list = [None for _ in range(num_imgs)]
- (all_labels, all_label_weights, all_bbox_cls_targets,
- all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,
- pos_inds_list, neg_inds_list) = multi_apply(
- self._get_target_single,
- approx_flat_list,
- inside_flag_flat_list,
- square_flat_list,
- gt_bboxes_list,
- gt_bboxes_ignore_list,
- gt_labels_list,
- img_metas,
- label_channels=label_channels,
- sampling=sampling,
- unmap_outputs=unmap_outputs)
- # no valid anchors
- if any([labels is None for labels in all_labels]):
- return None
- # sampled anchors of all images
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
- # split targets to a list w.r.t. multiple levels
- labels_list = images_to_levels(all_labels, num_level_squares)
- label_weights_list = images_to_levels(all_label_weights,
- num_level_squares)
- bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,
- num_level_squares)
- bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,
- num_level_squares)
- bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,
- num_level_squares)
- bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,
- num_level_squares)
- return (labels_list, label_weights_list, bbox_cls_targets_list,
- bbox_cls_weights_list, bbox_reg_targets_list,
- bbox_reg_weights_list, num_total_pos, num_total_neg)
-
- def _get_target_single(self,
- flat_approxs,
- inside_flags,
- flat_squares,
- gt_bboxes,
- gt_bboxes_ignore,
- gt_labels,
- img_meta,
- label_channels=None,
- sampling=True,
- unmap_outputs=True):
- """Compute regression and classification targets for anchors in a
- single image.
-
- Args:
- flat_approxs (Tensor): flat approxs of a single image,
- shape (n, 4)
- inside_flags (Tensor): inside flags of a single image,
- shape (n, ).
- flat_squares (Tensor): flat squares of a single image,
- shape (approxs_per_octave * n, 4)
- gt_bboxes (Tensor): Ground truth bboxes of a single image, \
- shape (num_gts, 4).
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
- ignored, shape (num_ignored_gts, 4).
- gt_labels (Tensor): Ground truth labels of each box,
- shape (num_gts,).
- img_meta (dict): Meta info of the image.
- label_channels (int): Channel of label.
- sampling (bool): Sample Anchors or not.
- unmap_outputs (bool): unmap outputs or not.
-
- Returns:
- tuple:
-
- - labels_list (Tensor): Labels in a single image
- - label_weights (Tensor): Label weights in a single image
- - bbox_cls_targets (Tensor): BBox cls targets in a single image
- - bbox_cls_weights (Tensor): BBox cls weights in a single image
- - bbox_reg_targets (Tensor): BBox reg targets in a single image
- - bbox_reg_weights (Tensor): BBox reg weights in a single image
- - num_total_pos (int): Number of positive samples \
- in a single image
- - num_total_neg (int): Number of negative samples \
- in a single image
- """
- if not inside_flags.any():
- return (None, ) * 8
- # assign gt and sample anchors
- expand_inside_flags = inside_flags[:, None].expand(
- -1, self.approxs_per_octave).reshape(-1)
- approxs = flat_approxs[expand_inside_flags, :]
- squares = flat_squares[inside_flags, :]
-
- assign_result = self.assigner.assign(approxs, squares,
- self.approxs_per_octave,
- gt_bboxes, gt_bboxes_ignore)
- sampling_result = self.sampler.sample(assign_result, squares,
- gt_bboxes)
-
- num_valid_squares = squares.shape[0]
- bbox_cls_targets = squares.new_zeros(
- (num_valid_squares, self.side_num * 4))
- bbox_cls_weights = squares.new_zeros(
- (num_valid_squares, self.side_num * 4))
- bbox_reg_targets = squares.new_zeros(
- (num_valid_squares, self.side_num * 4))
- bbox_reg_weights = squares.new_zeros(
- (num_valid_squares, self.side_num * 4))
- labels = squares.new_full((num_valid_squares, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)
-
- pos_inds = sampling_result.pos_inds
- neg_inds = sampling_result.neg_inds
- if len(pos_inds) > 0:
- (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,
- pos_bbox_cls_weights) = self.bbox_coder.encode(
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
-
- bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets
- bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets
- bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights
- bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights
- if gt_labels is None:
- # Only rpn gives gt_labels as None
- # Foreground is the first class
- labels[pos_inds] = 0
- else:
- labels[pos_inds] = gt_labels[
- sampling_result.pos_assigned_gt_inds]
- if self.train_cfg.pos_weight <= 0:
- label_weights[pos_inds] = 1.0
- else:
- label_weights[pos_inds] = self.train_cfg.pos_weight
- if len(neg_inds) > 0:
- label_weights[neg_inds] = 1.0
-
- # map up to original set of anchors
- if unmap_outputs:
- num_total_anchors = flat_squares.size(0)
- labels = unmap(
- labels, num_total_anchors, inside_flags, fill=self.num_classes)
- label_weights = unmap(label_weights, num_total_anchors,
- inside_flags)
- bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,
- inside_flags)
- bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,
- inside_flags)
- bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,
- inside_flags)
- bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,
- inside_flags)
- return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,
- bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds)
-
- def loss_single(self, cls_score, bbox_pred, labels, label_weights,
- bbox_cls_targets, bbox_cls_weights, bbox_reg_targets,
- bbox_reg_weights, num_total_samples):
- # classification loss
- labels = labels.reshape(-1)
- label_weights = label_weights.reshape(-1)
- cls_score = cls_score.permute(0, 2, 3,
- 1).reshape(-1, self.cls_out_channels)
- loss_cls = self.loss_cls(
- cls_score, labels, label_weights, avg_factor=num_total_samples)
- # regression loss
- bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)
- bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)
- bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)
- bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)
- (bbox_cls_pred, bbox_reg_pred) = bbox_pred
- bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(
- -1, self.side_num * 4)
- bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(
- -1, self.side_num * 4)
- loss_bbox_cls = self.loss_bbox_cls(
- bbox_cls_pred,
- bbox_cls_targets.long(),
- bbox_cls_weights,
- avg_factor=num_total_samples * 4 * self.side_num)
- loss_bbox_reg = self.loss_bbox_reg(
- bbox_reg_pred,
- bbox_reg_targets,
- bbox_reg_weights,
- avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk)
- return loss_cls, loss_bbox_cls, loss_bbox_reg
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
-
- device = cls_scores[0].device
-
- # get sampled approxes
- approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(
- self, featmap_sizes, img_metas, device=device)
-
- square_list = self.get_anchors(featmap_sizes, img_metas, device=device)
-
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
-
- cls_reg_targets = self.get_target(
- approxs_list,
- inside_flag_list,
- square_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=label_channels,
- sampling=self.sampling)
- if cls_reg_targets is None:
- return None
- (labels_list, label_weights_list, bbox_cls_targets_list,
- bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,
- num_total_pos, num_total_neg) = cls_reg_targets
- num_total_samples = (
- num_total_pos + num_total_neg if self.sampling else num_total_pos)
- losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(
- self.loss_single,
- cls_scores,
- bbox_preds,
- labels_list,
- label_weights_list,
- bbox_cls_targets_list,
- bbox_cls_weights_list,
- bbox_reg_targets_list,
- bbox_reg_weights_list,
- num_total_samples=num_total_samples)
- return dict(
- loss_cls=losses_cls,
- loss_bbox_cls=losses_bbox_cls,
- loss_bbox_reg=losses_bbox_reg)
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def get_bboxes(self,
- cls_scores,
- bbox_preds,
- img_metas,
- cfg=None,
- rescale=False):
- assert len(cls_scores) == len(bbox_preds)
- num_levels = len(cls_scores)
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
-
- device = cls_scores[0].device
- mlvl_anchors = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- result_list = []
- for img_id in range(len(img_metas)):
- cls_score_list = [
- cls_scores[i][img_id].detach() for i in range(num_levels)
- ]
- bbox_cls_pred_list = [
- bbox_preds[i][0][img_id].detach() for i in range(num_levels)
- ]
- bbox_reg_pred_list = [
- bbox_preds[i][1][img_id].detach() for i in range(num_levels)
- ]
- img_shape = img_metas[img_id]['img_shape']
- scale_factor = img_metas[img_id]['scale_factor']
- proposals = self.get_bboxes_single(cls_score_list,
- bbox_cls_pred_list,
- bbox_reg_pred_list,
- mlvl_anchors[img_id], img_shape,
- scale_factor, cfg, rescale)
- result_list.append(proposals)
- return result_list
-
- def get_bboxes_single(self,
- cls_scores,
- bbox_cls_preds,
- bbox_reg_preds,
- mlvl_anchors,
- img_shape,
- scale_factor,
- cfg,
- rescale=False):
- cfg = self.test_cfg if cfg is None else cfg
- mlvl_bboxes = []
- mlvl_scores = []
- mlvl_confids = []
- assert len(cls_scores) == len(bbox_cls_preds) == len(
- bbox_reg_preds) == len(mlvl_anchors)
- for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(
- cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):
- assert cls_score.size()[-2:] == bbox_cls_pred.size(
- )[-2:] == bbox_reg_pred.size()[-2::]
- cls_score = cls_score.permute(1, 2,
- 0).reshape(-1, self.cls_out_channels)
- if self.use_sigmoid_cls:
- scores = cls_score.sigmoid()
- else:
- scores = cls_score.softmax(-1)
- bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(
- -1, self.side_num * 4)
- bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(
- -1, self.side_num * 4)
- nms_pre = cfg.get('nms_pre', -1)
- if nms_pre > 0 and scores.shape[0] > nms_pre:
- if self.use_sigmoid_cls:
- max_scores, _ = scores.max(dim=1)
- else:
- max_scores, _ = scores[:, :-1].max(dim=1)
- _, topk_inds = max_scores.topk(nms_pre)
- anchors = anchors[topk_inds, :]
- bbox_cls_pred = bbox_cls_pred[topk_inds, :]
- bbox_reg_pred = bbox_reg_pred[topk_inds, :]
- scores = scores[topk_inds, :]
- bbox_preds = [
- bbox_cls_pred.contiguous(),
- bbox_reg_pred.contiguous()
- ]
- bboxes, confids = self.bbox_coder.decode(
- anchors.contiguous(), bbox_preds, max_shape=img_shape)
- mlvl_bboxes.append(bboxes)
- mlvl_scores.append(scores)
- mlvl_confids.append(confids)
- mlvl_bboxes = torch.cat(mlvl_bboxes)
- if rescale:
- mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
- mlvl_scores = torch.cat(mlvl_scores)
- mlvl_confids = torch.cat(mlvl_confids)
- if self.use_sigmoid_cls:
- padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
- mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
- det_bboxes, det_labels = multiclass_nms(
- mlvl_bboxes,
- mlvl_scores,
- cfg.score_thr,
- cfg.nms,
- cfg.max_per_img,
- score_factors=mlvl_confids)
- return det_bboxes, det_labels
diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/samplers/grouped_batch_sampler.py b/spaces/CVPR/regionclip-demo/detectron2/data/samplers/grouped_batch_sampler.py
deleted file mode 100644
index 5b247730aacd04dd0c752664acde3257c4eddd71..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/detectron2/data/samplers/grouped_batch_sampler.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import numpy as np
-from torch.utils.data.sampler import BatchSampler, Sampler
-
-
-class GroupedBatchSampler(BatchSampler):
- """
- Wraps another sampler to yield a mini-batch of indices.
- It enforces that the batch only contain elements from the same group.
- It also tries to provide mini-batches which follows an ordering which is
- as close as possible to the ordering from the original sampler.
- """
-
- def __init__(self, sampler, group_ids, batch_size):
- """
- Args:
- sampler (Sampler): Base sampler.
- group_ids (list[int]): If the sampler produces indices in range [0, N),
- `group_ids` must be a list of `N` ints which contains the group id of each sample.
- The group ids must be a set of integers in the range [0, num_groups).
- batch_size (int): Size of mini-batch.
- """
- if not isinstance(sampler, Sampler):
- raise ValueError(
- "sampler should be an instance of "
- "torch.utils.data.Sampler, but got sampler={}".format(sampler)
- )
- self.sampler = sampler
- self.group_ids = np.asarray(group_ids)
- assert self.group_ids.ndim == 1
- self.batch_size = batch_size
- groups = np.unique(self.group_ids).tolist()
-
- # buffer the indices of each group until batch size is reached
- self.buffer_per_group = {k: [] for k in groups}
-
- def __iter__(self):
- for idx in self.sampler:
- group_id = self.group_ids[idx]
- group_buffer = self.buffer_per_group[group_id]
- group_buffer.append(idx)
- if len(group_buffer) == self.batch_size:
- yield group_buffer[:] # yield a copy of the list
- del group_buffer[:]
-
- def __len__(self):
- raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
diff --git a/spaces/ChenWu98/Stable-CycleDiffusion/app.py b/spaces/ChenWu98/Stable-CycleDiffusion/app.py
deleted file mode 100644
index d5939864ed79f0ebefa2dd7124283ec503afb45c..0000000000000000000000000000000000000000
--- a/spaces/ChenWu98/Stable-CycleDiffusion/app.py
+++ /dev/null
@@ -1,421 +0,0 @@
-from diffusers import CycleDiffusionPipeline, DDIMScheduler
-import os
-import gradio as gr
-import torch
-from PIL import Image
-import utils
-import ptp_utils
-import seq_aligner
-import torch.nn.functional as nnf
-from typing import Optional, Union, Tuple, List, Callable, Dict
-import abc
-
-LOW_RESOURCE = False
-MAX_NUM_WORDS = 77
-
-is_colab = utils.is_google_colab()
-colab_instruction = "" if is_colab else """
-
- Demo for CycleDiffusion with Stable Diffusion.
- CycleDiffusion (📄 Paper link | 🧨 Pipeline doc) is an image-to-image translation method that supports stochastic samplers for diffusion models.
- We also support the combination of CycleDiffusion and Cross Attention Control (CAC | 📄 Paper link). CAC is a technique to transfer the attention map from the source prompt to the target prompt.
-
-
- Quick start:
- 1. Click one row of Examples at the end of this page. It will fill all inputs needed.
- 2. Click the "Run CycleDiffusion" button.
-
-
- {colab_instruction}
- Running on {device_print}{(" in a Google Colab." if is_colab else "")}
-
- How to use:
- 1. Upload an image.
- 2. Enter the source and target prompts.
- 3. Select the source guidance scale (for "encoding") and the target guidance scale (for "decoding").
- 4. Select the strength (smaller strength means better content preservation).
- 5 (optional). Configurate Cross Attention Control options (e.g., CAC type, cross replace steps, self replace steps).
- 6 (optional). Configurate other options (e.g., image size, inference steps, random seed).
- 7. Click the "Run CycleDiffusion" button.
-
-
- Notes:
- 1. CycleDiffusion is likely to fail when drastic changes are intended (e.g., changing a large black car to red).
- 2. The value of strength can be set larger when CAC is used.
- 3. If CAC type is "Replace", the source and target prompts should differ in only one token; otherwise, an error will be raised. This is why we deliberately make some grammar mistakes in Examples.
- 4. If CAC type is "Refine", the source prompt be a subsequence of the target prompt; otherwise, an error will be raised.
-
-
- Runtimes:
- 1. 20s on A10G.
-
-
- """
- )
- with gr.Row():
-
- with gr.Column(scale=55):
- with gr.Group():
-
- img = gr.Image(label="Input image", height=512, tool="editor", type="pil")
-
- image_out = gr.Image(label="Output image", height=512)
- # gallery = gr.Gallery(
- # label="Generated images", show_label=False, elem_id="gallery"
- # ).style(grid=[1], height="auto")
-
- with gr.Column(scale=45):
- with gr.Tab("Edit options"):
- with gr.Group():
- with gr.Row():
- source_prompt = gr.Textbox(label="Source prompt", placeholder="Source prompt describes the input image")
- source_guidance_scale = gr.Slider(label="Source guidance scale", value=1, minimum=1, maximum=10)
- with gr.Row():
- target_prompt = gr.Textbox(label="Target prompt", placeholder="Target prompt describes the output image")
- guidance_scale = gr.Slider(label="Target guidance scale", value=5, minimum=1, maximum=10)
- with gr.Row():
- strength = gr.Slider(label="Strength", value=0.7, minimum=0.5, maximum=1, step=0.01)
- with gr.Row():
- generate1 = gr.Button(value="Run CycleDiffusion")
-
- with gr.Tab("CAC options"):
- with gr.Group():
- with gr.Row():
- cross_attention_control = gr.Radio(label="CAC type", choices=["None", "Replace", "Refine"], value="None")
- with gr.Row():
- # If not "None", the following two parameters will be used.
- cross_replace_steps = gr.Slider(label="Cross replace steps", value=0.8, minimum=0.0, maximum=1, step=0.01)
- self_replace_steps = gr.Slider(label="Self replace steps", value=0.4, minimum=0.0, maximum=1, step=0.01)
- with gr.Row():
- generate2 = gr.Button(value="Run CycleDiffusion")
-
- with gr.Tab("Other options"):
- with gr.Group():
- with gr.Row():
- num_inference_steps = gr.Slider(label="Inference steps", value=100, minimum=25, maximum=500, step=1)
- width = gr.Slider(label="Width", value=512, minimum=512, maximum=1024, step=8)
- height = gr.Slider(label="Height", value=512, minimum=512, maximum=1024, step=8)
-
- with gr.Row():
- seed = gr.Slider(0, 2147483647, label='Seed', value=0, step=1)
- with gr.Row():
- generate3 = gr.Button(value="Run CycleDiffusion")
-
- inputs = [source_prompt, target_prompt, source_guidance_scale, guidance_scale, num_inference_steps,
- width, height, seed, img, strength,
- cross_attention_control, cross_replace_steps, self_replace_steps]
- generate1.click(inference, inputs=inputs, outputs=image_out)
- generate2.click(inference, inputs=inputs, outputs=image_out)
- generate3.click(inference, inputs=inputs, outputs=image_out)
-
- ex = gr.Examples(
- [
- ["An astronaut riding a horse", "An astronaut riding an elephant", 1, 2, 100, 512, 512, 0, "images/astronaut_horse.png", 0.8, "None", 0, 0],
- ["An astronaut riding a horse", "An astronaut riding a elephant", 1, 2, 100, 512, 512, 0, "images/astronaut_horse.png", 0.9, "Replace", 0.15, 0.10],
- ["A black colored car.", "A blue colored car.", 1, 3, 100, 512, 512, 0, "images/black_car.png", 0.85, "None", 0, 0],
- ["A black colored car.", "A blue colored car.", 1, 5, 100, 512, 512, 0, "images/black_car.png", 0.95, "Replace", 0.8, 0.4],
- ["A black colored car.", "A red colored car.", 1, 5, 100, 512, 512, 0, "images/black_car.png", 1, "Replace", 0.8, 0.4],
- ["An aerial view of autumn scene.", "An aerial view of winter scene.", 1, 5, 100, 512, 512, 0, "images/mausoleum.png", 0.9, "None", 0, 0],
- ["An aerial view of autumn scene.", "An aerial view of winter scene.", 1, 5, 100, 512, 512, 0, "images/mausoleum.png", 1, "Replace", 0.8, 0.4],
- ["A green apple and a black backpack on the floor.", "A red apple and a black backpack on the floor.", 1, 7, 100, 512, 512, 0, "images/apple_bag.png", 0.9, "None", 0, 0],
- ["A green apple and a black backpack on the floor.", "A red apple and a black backpack on the floor.", 1, 7, 100, 512, 512, 0, "images/apple_bag.png", 0.9, "Replace", 0.8, 0.4],
- ["A hotel room with red flowers on the bed.", "A hotel room with a cat sitting on the bed.", 1, 4, 100, 512, 512, 0, "images/flower_hotel.png", 0.8, "None", 0, 0],
- ["A hotel room with red flowers on the bed.", "A hotel room with blue flowers on the bed.", 1, 5, 100, 512, 512, 0, "images/flower_hotel.png", 0.95, "None", 0, 0],
- ["A green apple and a black backpack on the floor.", "Two green apples and a black backpack on the floor.", 1, 5, 100, 512, 512, 0, "images/apple_bag.png", 0.89, "None", 0, 0],
- ],
- [source_prompt, target_prompt, source_guidance_scale, guidance_scale, num_inference_steps,
- width, height, seed, img, strength,
- cross_attention_control, cross_replace_steps, self_replace_steps],
- image_out, inference, cache_examples=True)
-
- gr.Markdown('''
- Space built with Diffusers 🧨 by HuggingFace 🤗.
- [](https://twitter.com/ChenHenryWu)
- 
- ''')
-
-if not is_colab:
- demo.queue(concurrency_count=1)
-demo.launch(debug=is_colab, share=is_colab)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/resolver.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/resolver.py
deleted file mode 100644
index 531ce93fccc2d3be442556de644cdc78d31d9c6e..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/resolver.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import asyncio
-import socket
-from typing import Any, Dict, List, Optional, Type, Union
-
-from .abc import AbstractResolver
-from .helpers import get_running_loop
-
-__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver")
-
-try:
- import aiodns
-
- # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
-except ImportError: # pragma: no cover
- aiodns = None
-
-aiodns_default = False
-
-
-class ThreadedResolver(AbstractResolver):
- """Threaded resolver.
-
- Uses an Executor for synchronous getaddrinfo() calls.
- concurrent.futures.ThreadPoolExecutor is used by default.
- """
-
- def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
- self._loop = get_running_loop(loop)
-
- async def resolve(
- self, hostname: str, port: int = 0, family: int = socket.AF_INET
- ) -> List[Dict[str, Any]]:
- infos = await self._loop.getaddrinfo(
- hostname,
- port,
- type=socket.SOCK_STREAM,
- family=family,
- flags=socket.AI_ADDRCONFIG,
- )
-
- hosts = []
- for family, _, proto, _, address in infos:
- if family == socket.AF_INET6:
- if len(address) < 3:
- # IPv6 is not supported by Python build,
- # or IPv6 is not enabled in the host
- continue
- if address[3]: # type: ignore[misc]
- # This is essential for link-local IPv6 addresses.
- # LL IPv6 is a VERY rare case. Strictly speaking, we should use
- # getnameinfo() unconditionally, but performance makes sense.
- host, _port = socket.getnameinfo(
- address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
- )
- port = int(_port)
- else:
- host, port = address[:2]
- else: # IPv4
- assert family == socket.AF_INET
- host, port = address # type: ignore[misc]
- hosts.append(
- {
- "hostname": hostname,
- "host": host,
- "port": port,
- "family": family,
- "proto": proto,
- "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
- }
- )
-
- return hosts
-
- async def close(self) -> None:
- pass
-
-
-class AsyncResolver(AbstractResolver):
- """Use the `aiodns` package to make asynchronous DNS lookups"""
-
- def __init__(
- self,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- *args: Any,
- **kwargs: Any
- ) -> None:
- if aiodns is None:
- raise RuntimeError("Resolver requires aiodns library")
-
- self._loop = get_running_loop(loop)
- self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)
-
- if not hasattr(self._resolver, "gethostbyname"):
- # aiodns 1.1 is not available, fallback to DNSResolver.query
- self.resolve = self._resolve_with_query # type: ignore
-
- async def resolve(
- self, host: str, port: int = 0, family: int = socket.AF_INET
- ) -> List[Dict[str, Any]]:
- try:
- resp = await self._resolver.gethostbyname(host, family)
- except aiodns.error.DNSError as exc:
- msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
- raise OSError(msg) from exc
- hosts = []
- for address in resp.addresses:
- hosts.append(
- {
- "hostname": host,
- "host": address,
- "port": port,
- "family": family,
- "proto": 0,
- "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
- }
- )
-
- if not hosts:
- raise OSError("DNS lookup failed")
-
- return hosts
-
- async def _resolve_with_query(
- self, host: str, port: int = 0, family: int = socket.AF_INET
- ) -> List[Dict[str, Any]]:
- if family == socket.AF_INET6:
- qtype = "AAAA"
- else:
- qtype = "A"
-
- try:
- resp = await self._resolver.query(host, qtype)
- except aiodns.error.DNSError as exc:
- msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
- raise OSError(msg) from exc
-
- hosts = []
- for rr in resp:
- hosts.append(
- {
- "hostname": host,
- "host": rr.host,
- "port": port,
- "family": family,
- "proto": 0,
- "flags": socket.AI_NUMERICHOST,
- }
- )
-
- if not hosts:
- raise OSError("DNS lookup failed")
-
- return hosts
-
- async def close(self) -> None:
- self._resolver.cancel()
-
-
-_DefaultType = Type[Union[AsyncResolver, ThreadedResolver]]
-DefaultResolver: _DefaultType = AsyncResolver if aiodns_default else ThreadedResolver
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py
deleted file mode 100644
index 92ccd77a2de2e865e92c5e6943a66bdaff91f840..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-
-from traceback import format_exception
-
-
-class BrokenResourceError(Exception):
- """
- Raised when trying to use a resource that has been rendered unusable due to external causes
- (e.g. a send stream whose peer has disconnected).
- """
-
-
-class BrokenWorkerProcess(Exception):
- """
- Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or otherwise
- misbehaves.
- """
-
-
-class BusyResourceError(Exception):
- """Raised when two tasks are trying to read from or write to the same resource concurrently."""
-
- def __init__(self, action: str):
- super().__init__(f"Another task is already {action} this resource")
-
-
-class ClosedResourceError(Exception):
- """Raised when trying to use a resource that has been closed."""
-
-
-class DelimiterNotFound(Exception):
- """
- Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
- maximum number of bytes has been read without the delimiter being found.
- """
-
- def __init__(self, max_bytes: int) -> None:
- super().__init__(
- f"The delimiter was not found among the first {max_bytes} bytes"
- )
-
-
-class EndOfStream(Exception):
- """Raised when trying to read from a stream that has been closed from the other end."""
-
-
-class ExceptionGroup(BaseException):
- """
- Raised when multiple exceptions have been raised in a task group.
-
- :var ~typing.Sequence[BaseException] exceptions: the sequence of exceptions raised together
- """
-
- SEPARATOR = "----------------------------\n"
-
- exceptions: list[BaseException]
-
- def __str__(self) -> str:
- tracebacks = [
- "".join(format_exception(type(exc), exc, exc.__traceback__))
- for exc in self.exceptions
- ]
- return (
- f"{len(self.exceptions)} exceptions were raised in the task group:\n"
- f"{self.SEPARATOR}{self.SEPARATOR.join(tracebacks)}"
- )
-
- def __repr__(self) -> str:
- exception_reprs = ", ".join(repr(exc) for exc in self.exceptions)
- return f"<{self.__class__.__name__}: {exception_reprs}>"
-
-
-class IncompleteRead(Exception):
- """
- Raised during :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
- :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
- connection is closed before the requested amount of bytes has been read.
- """
-
- def __init__(self) -> None:
- super().__init__(
- "The stream was closed before the read operation could be completed"
- )
-
-
-class TypedAttributeLookupError(LookupError):
- """
- Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute is not
- found and no default value has been given.
- """
-
-
-class WouldBlock(Exception):
- """Raised by ``X_nowait`` functions if ``X()`` would block."""
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/unicode.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/unicode.py
deleted file mode 100644
index a9ffeefac1c9e553c53bc12346e49e7ece8d364a..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/unicode.py
+++ /dev/null
@@ -1,50 +0,0 @@
-def _makeunicodes(f):
- lines = iter(f.readlines())
- unicodes = {}
- for line in lines:
- if not line:
- continue
- num, name = line.split(";")[:2]
- if name[0] == "<":
- continue # "", etc.
- num = int(num, 16)
- unicodes[num] = name
- return unicodes
-
-
-class _UnicodeCustom(object):
- def __init__(self, f):
- if isinstance(f, str):
- with open(f) as fd:
- codes = _makeunicodes(fd)
- else:
- codes = _makeunicodes(f)
- self.codes = codes
-
- def __getitem__(self, charCode):
- try:
- return self.codes[charCode]
- except KeyError:
- return "????"
-
-
-class _UnicodeBuiltin(object):
- def __getitem__(self, charCode):
- try:
- # use unicodedata backport to python2, if available:
- # https://github.com/mikekap/unicodedata2
- import unicodedata2 as unicodedata
- except ImportError:
- import unicodedata
- try:
- return unicodedata.name(chr(charCode))
- except ValueError:
- return "????"
-
-
-Unicode = _UnicodeBuiltin()
-
-
-def setUnicodeData(f):
- global Unicode
- Unicode = _UnicodeCustom(f)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/json_component.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/json_component.py
deleted file mode 100644
index bdd32c51febf8a7aaaa0fbab65d55c387e7c9576..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/json_component.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""gr.JSON() component."""
-
-from __future__ import annotations
-
-import json
-from typing import Any, Callable, Literal
-
-from gradio_client.documentation import document, set_documentation_group
-from gradio_client.serializing import JSONSerializable
-
-from gradio.components.base import IOComponent, _Keywords
-from gradio.deprecation import warn_style_method_deprecation
-from gradio.events import (
- Changeable,
-)
-
-set_documentation_group("component")
-
-
-@document()
-class JSON(Changeable, IOComponent, JSONSerializable):
- """
- Used to display arbitrary JSON output prettily.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {str} filepath to a file containing valid JSON -- or a {list} or {dict} that is valid JSON
-
- Demos: zip_to_json, blocks_xray
- """
-
- def __init__(
- self,
- value: str | dict | list | Callable | None = None,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- container: bool | None = None,
- scale: int | None = None,
- min_width: int | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "container": container,
- "scale": scale,
- "min_width": min_width,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(self, y: dict | list | str | None) -> dict | list | None:
- """
- Parameters:
- y: either a string filepath to a JSON file, or a Python list or dict that can be converted to JSON
- Returns:
- JSON output in Python list or dict format
- """
- if y is None:
- return None
- if isinstance(y, str):
- return json.loads(y)
- else:
- return y
-
- def style(self, *, container: bool | None = None, **kwargs):
- """
- This method is deprecated. Please set these arguments in the constructor instead.
- """
- warn_style_method_deprecation()
- if container is not None:
- self.container = container
- return self
diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/evalablate.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/evalablate.py
deleted file mode 100644
index 2079ffdb303b288df77678109f701e40fdf5779b..0000000000000000000000000000000000000000
--- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/evalablate.py
+++ /dev/null
@@ -1,248 +0,0 @@
-import torch, sys, os, argparse, textwrap, numbers, numpy, json, PIL
-from torchvision import transforms
-from torch.utils.data import TensorDataset
-from netdissect.progress import default_progress, post_progress, desc_progress
-from netdissect.progress import verbose_progress, print_progress
-from netdissect.nethook import edit_layers
-from netdissect.zdataset import standard_z_sample
-from netdissect.autoeval import autoimport_eval
-from netdissect.easydict import EasyDict
-from netdissect.modelconfig import create_instrumented_model
-
-help_epilog = '''\
-Example:
-
-python -m netdissect.evalablate \
- --segmenter "netdissect.segmenter.UnifiedParsingSegmenter(segsizes=[256], segdiv='quad')" \
- --model "proggan.from_pth_file('models/lsun_models/${SCENE}_lsun.pth')" \
- --outdir dissect/dissectdir \
- --classes mirror coffeetable tree \
- --layers layer4 \
- --size 1000
-
-Output layout:
-dissectdir/layer5/ablation/mirror-iqr.json
-{ class: "mirror",
- classnum: 43,
- pixel_total: 41342300,
- class_pixels: 1234531,
- layer: "layer5",
- ranking: "mirror-iqr",
- ablation_units: [341, 23, 12, 142, 83, ...]
- ablation_pixels: [143242, 132344, 429931, ...]
-}
-
-'''
-
-def main():
- # Training settings
- def strpair(arg):
- p = tuple(arg.split(':'))
- if len(p) == 1:
- p = p + p
- return p
-
- parser = argparse.ArgumentParser(description='Ablation eval',
- epilog=textwrap.dedent(help_epilog),
- formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument('--model', type=str, default=None,
- help='constructor for the model to test')
- parser.add_argument('--pthfile', type=str, default=None,
- help='filename of .pth file for the model')
- parser.add_argument('--outdir', type=str, default='dissect', required=True,
- help='directory for dissection output')
- parser.add_argument('--layers', type=strpair, nargs='+',
- help='space-separated list of layer names to edit' +
- ', in the form layername[:reportedname]')
- parser.add_argument('--classes', type=str, nargs='+',
- help='space-separated list of class names to ablate')
- parser.add_argument('--metric', type=str, default='iou',
- help='ordering metric for selecting units')
- parser.add_argument('--unitcount', type=int, default=30,
- help='number of units to ablate')
- parser.add_argument('--segmenter', type=str,
- help='directory containing segmentation dataset')
- parser.add_argument('--netname', type=str, default=None,
- help='name for network in generated reports')
- parser.add_argument('--batch_size', type=int, default=5,
- help='batch size for forward pass')
- parser.add_argument('--size', type=int, default=200,
- help='number of images to test')
- parser.add_argument('--no-cuda', action='store_true', default=False,
- help='disables CUDA usage')
- parser.add_argument('--quiet', action='store_true', default=False,
- help='silences console output')
- if len(sys.argv) == 1:
- parser.print_usage(sys.stderr)
- sys.exit(1)
- args = parser.parse_args()
-
- # Set up console output
- verbose_progress(not args.quiet)
-
- # Speed up pytorch
- torch.backends.cudnn.benchmark = True
-
- # Set up CUDA
- args.cuda = not args.no_cuda and torch.cuda.is_available()
- if args.cuda:
- torch.backends.cudnn.benchmark = True
-
- # Take defaults for model constructor etc from dissect.json settings.
- with open(os.path.join(args.outdir, 'dissect.json')) as f:
- dissection = EasyDict(json.load(f))
- if args.model is None:
- args.model = dissection.settings.model
- if args.pthfile is None:
- args.pthfile = dissection.settings.pthfile
- if args.segmenter is None:
- args.segmenter = dissection.settings.segmenter
-
- # Instantiate generator
- model = create_instrumented_model(args, gen=True, edit=True)
- if model is None:
- print('No model specified')
- sys.exit(1)
-
- # Instantiate model
- device = next(model.parameters()).device
- input_shape = model.input_shape
-
- # 4d input if convolutional, 2d input if first layer is linear.
- raw_sample = standard_z_sample(args.size, input_shape[1], seed=2).view(
- (args.size,) + input_shape[1:])
- dataset = TensorDataset(raw_sample)
-
- # Create the segmenter
- segmenter = autoimport_eval(args.segmenter)
-
- # Now do the actual work.
- labelnames, catnames = (
- segmenter.get_label_and_category_names(dataset))
- label_category = [catnames.index(c) if c in catnames else 0
- for l, c in labelnames]
- labelnum_from_name = {n[0]: i for i, n in enumerate(labelnames)}
-
- segloader = torch.utils.data.DataLoader(dataset,
- batch_size=args.batch_size, num_workers=10,
- pin_memory=(device.type == 'cuda'))
-
- # Index the dissection layers by layer name.
- dissect_layer = {lrec.layer: lrec for lrec in dissection.layers}
-
- # First, collect a baseline
- for l in model.ablation:
- model.ablation[l] = None
-
- # For each sort-order, do an ablation
- progress = default_progress()
- for classname in progress(args.classes):
- post_progress(c=classname)
- for layername in progress(model.ablation):
- post_progress(l=layername)
- rankname = '%s-%s' % (classname, args.metric)
- classnum = labelnum_from_name[classname]
- try:
- ranking = next(r for r in dissect_layer[layername].rankings
- if r.name == rankname)
- except:
- print('%s not found' % rankname)
- sys.exit(1)
- ordering = numpy.argsort(ranking.score)
- # Check if already done
- ablationdir = os.path.join(args.outdir, layername, 'pixablation')
- if os.path.isfile(os.path.join(ablationdir, '%s.json'%rankname)):
- with open(os.path.join(ablationdir, '%s.json'%rankname)) as f:
- data = EasyDict(json.load(f))
- # If the unit ordering is not the same, something is wrong
- if not all(a == o
- for a, o in zip(data.ablation_units, ordering)):
- continue
- if len(data.ablation_effects) >= args.unitcount:
- continue # file already done.
- measurements = data.ablation_effects
- measurements = measure_ablation(segmenter, segloader,
- model, classnum, layername, ordering[:args.unitcount])
- measurements = measurements.cpu().numpy().tolist()
- os.makedirs(ablationdir, exist_ok=True)
- with open(os.path.join(ablationdir, '%s.json'%rankname), 'w') as f:
- json.dump(dict(
- classname=classname,
- classnum=classnum,
- baseline=measurements[0],
- layer=layername,
- metric=args.metric,
- ablation_units=ordering.tolist(),
- ablation_effects=measurements[1:]), f)
-
-def measure_ablation(segmenter, loader, model, classnum, layer, ordering):
- total_bincount = 0
- data_size = 0
- device = next(model.parameters()).device
- progress = default_progress()
- for l in model.ablation:
- model.ablation[l] = None
- feature_units = model.feature_shape[layer][1]
- feature_shape = model.feature_shape[layer][2:]
- repeats = len(ordering)
- total_scores = torch.zeros(repeats + 1)
- for i, batch in enumerate(progress(loader)):
- z_batch = batch[0]
- model.ablation[layer] = None
- tensor_images = model(z_batch.to(device))
- seg = segmenter.segment_batch(tensor_images, downsample=2)
- mask = (seg == classnum).max(1)[0]
- downsampled_seg = torch.nn.functional.adaptive_avg_pool2d(
- mask.float()[:,None,:,:], feature_shape)[:,0,:,:]
- total_scores[0] += downsampled_seg.sum().cpu()
- # Now we need to do an intervention for every location
- # that had a nonzero downsampled_seg, if any.
- interventions_needed = downsampled_seg.nonzero()
- location_count = len(interventions_needed)
- if location_count == 0:
- continue
- interventions_needed = interventions_needed.repeat(repeats, 1)
- inter_z = batch[0][interventions_needed[:,0]].to(device)
- inter_chan = torch.zeros(repeats, location_count, feature_units,
- device=device)
- for j, u in enumerate(ordering):
- inter_chan[j:, :, u] = 1
- inter_chan = inter_chan.view(len(inter_z), feature_units)
- inter_loc = interventions_needed[:,1:]
- scores = torch.zeros(len(inter_z))
- batch_size = len(batch[0])
- for j in range(0, len(inter_z), batch_size):
- ibz = inter_z[j:j+batch_size]
- ibl = inter_loc[j:j+batch_size].t()
- imask = torch.zeros((len(ibz),) + feature_shape, device=ibz.device)
- imask[(torch.arange(len(ibz)),) + tuple(ibl)] = 1
- ibc = inter_chan[j:j+batch_size]
- model.ablation[layer] = (
- imask.float()[:,None,:,:] * ibc[:,:,None,None])
- tensor_images = model(ibz)
- seg = segmenter.segment_batch(tensor_images, downsample=2)
- mask = (seg == classnum).max(1)[0]
- downsampled_iseg = torch.nn.functional.adaptive_avg_pool2d(
- mask.float()[:,None,:,:], feature_shape)[:,0,:,:]
- scores[j:j+batch_size] = downsampled_iseg[
- (torch.arange(len(ibz)),) + tuple(ibl)]
- scores = scores.view(repeats, location_count).sum(1)
- total_scores[1:] += scores
- return total_scores
-
-def count_segments(segmenter, loader, model):
- total_bincount = 0
- data_size = 0
- progress = default_progress()
- for i, batch in enumerate(progress(loader)):
- tensor_images = model(z_batch.to(device))
- seg = segmenter.segment_batch(tensor_images, downsample=2)
- bc = (seg + index[:, None, None, None] * self.num_classes).view(-1
- ).bincount(minlength=z_batch.shape[0] * self.num_classes)
- data_size += seg.shape[0] * seg.shape[2] * seg.shape[3]
- total_bincount += batch_label_counts.float().sum(0)
- normalized_bincount = total_bincount / data_size
- return normalized_bincount
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/__init__.py b/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/__init__.py
deleted file mode 100644
index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .fused_act import FusedLeakyReLU, fused_leaky_relu
-from .upfirdn2d import upfirdn2d
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/bias_act.h b/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/bias_act.h
deleted file mode 100644
index a32187e1fb7e3bae509d4eceaf900866866875a4..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/torch_utils/ops/bias_act.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-//------------------------------------------------------------------------
-// CUDA kernel parameters.
-
-struct bias_act_kernel_params
-{
- const void* x; // [sizeX]
- const void* b; // [sizeB] or NULL
- const void* xref; // [sizeX] or NULL
- const void* yref; // [sizeX] or NULL
- const void* dy; // [sizeX] or NULL
- void* y; // [sizeX]
-
- int grad;
- int act;
- float alpha;
- float gain;
- float clamp;
-
- int sizeX;
- int sizeB;
- int stepB;
- int loopX;
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel selection.
-
-template void* choose_bias_act_kernel(const bias_act_kernel_params& p);
-
-//------------------------------------------------------------------------
diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/__init__.py b/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/DylanYan/WizardLM-WizardCoder-Python-34B-V1.0/README.md b/spaces/DylanYan/WizardLM-WizardCoder-Python-34B-V1.0/README.md
deleted file mode 100644
index 313e889c58707775cca3a10ccbb62cbca7159cb5..0000000000000000000000000000000000000000
--- a/spaces/DylanYan/WizardLM-WizardCoder-Python-34B-V1.0/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: WizardLM WizardCoder Python 34B V1.0
-emoji: 🌖
-colorFrom: green
-colorTo: purple
-sdk: gradio
-sdk_version: 3.41.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Falah/stablediffusionDB/README.md b/spaces/Falah/stablediffusionDB/README.md
deleted file mode 100644
index 615dc43280960c05eb2f4b1b4ed343813ed26d12..0000000000000000000000000000000000000000
--- a/spaces/Falah/stablediffusionDB/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: StablediffusionDB
-emoji: 🏢
-colorFrom: purple
-colorTo: purple
-sdk: gradio
-sdk_version: 3.33.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Fr33d0m21/Music_Splitter/app.py b/spaces/Fr33d0m21/Music_Splitter/app.py
deleted file mode 100644
index 67b0ad0943e927f88e28ebb1cef9bc0794d68250..0000000000000000000000000000000000000000
--- a/spaces/Fr33d0m21/Music_Splitter/app.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import gradio as gr
-from scipy.io.wavfile import write
-
-
-def inference(audio):
- os.makedirs("out", exist_ok=True)
- write('test.wav', audio[0], audio[1])
- os.system("python3 -m demucs.separate -n mdx_extra_q -d cpu test.wav -o out")
- return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav",\
-"./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
-
-title = "Demucs"
-description = "Gradio demo for Demucs: Music Source Separation in the Waveform Domain. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below."
-article = "
"
-
-examples=[['test.mp3']]
-gr.Interface(
- inference,
- gr.inputs.Audio(type="numpy", label="Input"),
- [gr.outputs.Audio(type="filepath", label="Vocals"),gr.outputs.Audio(type="filepath", label="Bass"),gr.outputs.Audio(type="filepath", label="Drums"),gr.outputs.Audio(type="filepath", label="Other")],
- title=title,
- description=description,
- article=article,
- examples=examples
- ).launch(enable_queue=True)
\ No newline at end of file
diff --git a/spaces/FridaZuley/RVC_HFKawaii/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/FridaZuley/RVC_HFKawaii/lib/infer_pack/modules/F0Predictor/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/GT4SD/regression_transformer/model_cards/regression_transformer_description.md b/spaces/GT4SD/regression_transformer/model_cards/regression_transformer_description.md
deleted file mode 100644
index 3582593b03d2b224902f1007ff393b8639a84cc0..0000000000000000000000000000000000000000
--- a/spaces/GT4SD/regression_transformer/model_cards/regression_transformer_description.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
-
-
-### Concurrent sequence regression and generation for molecular language modeling
-
-The [Regression Transformer](https://www.nature.com/articles/s42256-023-00639-z) is a multitask Transformer that reformulates regression as a conditional sequence modeling task.
-This yields a dichotomous language model that seamlessly integrates property prediction with property-driven conditional generation. For details see the [*Nature Machine Intelligence* paper](https://www.nature.com/articles/s42256-023-00639-z), the [development code](https://github.com/IBM/regression-transformer) and the [GT4SD endpoint](https://github.com/GT4SD/gt4sd-core) for inference.
-
-Each `algorithm_version` refers to one trained model. Each model can be used for **two tasks**, either to *predict* one (or multiple) properties of a molecule or to *generate* a molecule (given a seed molecule and a property constraint).
-
-For **examples** and **documentation** of the model parameters, please see below.
-Moreover, we provide a **model card** ([Mitchell et al. (2019)](https://dl.acm.org/doi/abs/10.1145/3287560.3287596?casa_token=XD4eHiE2cRUAAAAA:NL11gMa1hGPOUKTAbtXnbVQBDBbjxwcjGECF_i-WC_3g1aBgU1Hbz_f2b4kI_m1in-w__1ztGeHnwHs)) at the bottom of this page.
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
deleted file mode 100644
index 9212dda4992b4d18cef9a4916b765ef37850237f..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scratch/README.md b/spaces/Gradio-Blocks/uniformer_image_detection/configs/scratch/README.md
deleted file mode 100644
index a338dc5d2c7c30a954b927d748afa3d7067542f4..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scratch/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Rethinking ImageNet Pre-training
-
-## Introduction
-
-[ALGORITHM]
-
-```latex
-@article{he2018rethinking,
- title={Rethinking imagenet pre-training},
- author={He, Kaiming and Girshick, Ross and Doll{\'a}r, Piotr},
- journal={arXiv preprint arXiv:1811.08883},
- year={2018}
-}
-```
-
-## Results and Models
-
-| Model | Backbone | Style | Lr schd | box AP | mask AP | Config | Download |
-|:------------:|:---------:|:-------:|:-------:|:------:|:-------:|:------:|:--------:|
-| Faster R-CNN | R-50-FPN | pytorch | 6x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_bbox_mAP-0.407_20200201_193013-90813d01.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/scratch/faster_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_faster_rcnn_r50_fpn_gn_6x_20200201_193013.log.json) |
-| Mask R-CNN | R-50-FPN | pytorch | 6x | 41.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_bbox_mAP-0.412__segm_mAP-0.374_20200201_193051-1e190a40.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco/scratch_mask_rcnn_r50_fpn_gn_6x_20200201_193051.log.json) |
-
-Note:
-
-- The above models are trained with 16 GPUs.
diff --git a/spaces/Guilherme34/Jennifer-Llama270b-Chatbot-with-vision-v1/README.md b/spaces/Guilherme34/Jennifer-Llama270b-Chatbot-with-vision-v1/README.md
deleted file mode 100644
index 59ee58813cf50b168a5a21d905e5bd7cb3af648a..0000000000000000000000000000000000000000
--- a/spaces/Guilherme34/Jennifer-Llama270b-Chatbot-with-vision-v1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Jennifer Llama270b Chatbot With Vision
-emoji: 🐠
-colorFrom: pink
-colorTo: gray
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/HachiRe/Fusani/style.css b/spaces/HachiRe/Fusani/style.css
deleted file mode 100644
index df5fe434ac82e3cb87b3dafd48be11a0369bb4c0..0000000000000000000000000000000000000000
--- a/spaces/HachiRe/Fusani/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Ubuntu", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/adaptive_span/adaptive_span_model.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/adaptive_span/adaptive_span_model.py
deleted file mode 100644
index d96c95b85dbcf29e9384cc6d8d9630d2489991b2..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/adaptive_span/adaptive_span_model.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from fairseq.modules.layer_norm import LayerNorm
-
-from .adaptive_span_attention import AdaptiveSpan
-
-# Size notations:
-# B = batch_size, H = d_model, M = block_size, L = attn_span
-
-
-def _skew(X, pad_value):
- """shift every row 1 step to right"""
- # X = B x M x L
- B, M, L = X.size()
- X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1)
- X = X.view(B, -1) # B x ML+MM+M
- X = X[:, :-M] # B x ML+MM
- X = X.view(B, M, M + L) # B x M x L+M
- return X
-
-
-def _unskew(X):
- """reverse _skew operation"""
- # X = B x M x L+M
- B, M, L = X.size()
- L -= M
- X = X.view(B, -1) # B x ML+MM
- X = F.pad(X, (0, M)) # B x ML+MM+M
- X = X.view(B, M, M + L + 1) # B x M x L+M+1
- X = X[:, :, :L] # B x M x L
- return X
-
-
-class SeqAttention(nn.Module):
- """Sequential self-attention layer.
- Each token will attend to its previous fixed number of steps.
- Note that attention doesn't include the current step itself.
- """
-
- def __init__(self, d_model, n_head, attn_span, dropout, adapt_span_layer, **kargs):
- nn.Module.__init__(self)
- self.dropout = nn.Dropout(dropout)
- self.d_model = d_model # size of a single head
- self.attn_span = attn_span
- self.adaptive_span = AdaptiveSpan(
- attn_span=attn_span,
- n_head=n_head,
- adapt_span_layer=adapt_span_layer,
- **kargs
- )
-
- def forward(self, query, key, value, key_pe):
- # query size = B x M x H
- # key, value sizes = B x (M+L) x H
-
- key, value, key_pe = self.adaptive_span.trim_memory(query, key, value, key_pe)
-
- # compute attention from context
- # B x M (dest) x (M+L) (src)
- attn_cont = torch.matmul(query, key.transpose(-1, -2))
- attn_cont = _unskew(attn_cont) # B x M x L
-
- # compute the effect of position embedding
- attn_pos = torch.matmul(query, key_pe) # B x M x L_pos
- attn = attn_cont + attn_pos
-
- attn = attn / math.sqrt(self.d_model) # B x M X L_pos
-
- attn = F.softmax(attn.float(), dim=-1).type_as(attn)
-
- # trim attention lengths according to the learned span
- attn = self.adaptive_span(attn)
-
- attn = self.dropout(attn) # B x M X L_pos
-
- attn_cont = _skew(attn, 0) # B x M X (L+M)
- out = torch.matmul(attn_cont, value) # B x M x H
- return out
-
- def get_cache_size(self):
- return self.adaptive_span.get_cache_size()
-
-
-class MultiHeadSeqAttention(nn.Module):
- def __init__(self, d_model, n_head, **kargs):
- nn.Module.__init__(self)
- assert d_model % n_head == 0
- self.n_head = n_head
- self.head_dim = d_model // n_head
- self.attn = SeqAttention(d_model=self.head_dim, n_head=n_head, **kargs)
- self.proj_query = nn.Linear(d_model, d_model, bias=False)
- nn.init.xavier_normal_(self.proj_query.weight)
- self.proj_out = nn.Linear(d_model, d_model, bias=False)
- nn.init.xavier_normal_(self.proj_out.weight)
- self.proj_val = nn.Linear(d_model, d_model, bias=False)
- nn.init.xavier_normal_(self.proj_val.weight)
- self.proj_key = nn.Linear(d_model, d_model, bias=False)
- nn.init.xavier_normal_(self.proj_key.weight)
-
- def head_reshape(self, x):
- K = self.n_head
- D = self.head_dim
- x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D
- x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D
- x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D
- return x
-
- def forward(self, query, key, value, key_pe):
- B = query.size(0)
- K = self.n_head
- D = self.head_dim
- M = query.size(1)
-
- query = self.proj_query(query)
- query = self.head_reshape(query)
- value = self.proj_val(value)
- value = self.head_reshape(value)
- key = self.proj_key(key)
- key = self.head_reshape(key)
-
- out = self.attn(query, key, value, key_pe) # B_K x M x D
- out = out.view(B, K, M, D) # B x K x M x D
- out = out.transpose(1, 2).contiguous() # B x M x K x D
- out = out.view(B, M, -1) # B x M x K_D
- out = self.proj_out(out)
- return out
-
-
-class FeedForwardLayer(nn.Module):
- def __init__(self, d_model, d_inner, dropout, **kargs):
- nn.Module.__init__(self)
- self.fc1 = nn.Linear(d_model, d_inner)
- self.fc2 = nn.Linear(d_inner, d_model)
- nn.init.xavier_uniform_(self.fc1.weight)
- nn.init.xavier_uniform_(self.fc2.weight)
- self.dropout = nn.Dropout(dropout)
-
- def forward(self, h):
- h1 = F.relu(self.fc1(h))
- h1 = self.dropout(h1)
- h2 = self.fc2(h1)
- return h2
-
-
-class TransformerSeqLayer(nn.Module):
- def __init__(self, d_model, **kargs):
- nn.Module.__init__(self)
- self.attn = MultiHeadSeqAttention(d_model=d_model, **kargs)
- self.norm1 = LayerNorm(d_model)
- self.ff = FeedForwardLayer(d_model=d_model, **kargs)
- self.norm2 = LayerNorm(d_model)
-
- def forward(self, h, h_cache, key_pe):
- # h = B x M x H
- # h_cache = B x L x H
- h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H
- attn_out = self.attn(h, h_all, h_all, key_pe)
- h = self.norm1(h + attn_out) # B x M x H
- if self.ff is not None:
- ff_out = self.ff(h)
- out = self.norm2(h + ff_out) # B x M x H
- else:
- out = h
- return out
-
- def get_cache_size(self):
- return self.attn.attn.get_cache_size()
-
-
-class TransformerSeq(nn.Module):
- def __init__(
- self,
- vocab_size,
- d_model,
- n_head,
- n_layer,
- attn_span,
- emb_dropout,
- aux_loss_scaler,
- adapt_span_layer,
- **kargs
- ):
- nn.Module.__init__(self)
- # token embeddings
- self.in_emb = nn.Embedding(vocab_size, d_model)
- nn.init.normal_(self.in_emb.weight, mean=0, std=d_model ** -0.5)
- self.out_emb = nn.Linear(d_model, vocab_size)
- self.aux_loss_scaler = aux_loss_scaler
- if emb_dropout > 0:
- self.emb_dropout = nn.Dropout(emb_dropout)
- else:
- self.emb_dropout = None
- # position embeddings
- self.key_pe = nn.Parameter(torch.randn(1, d_model // n_head, attn_span))
-
- self.layers = nn.ModuleList()
- self.layers.extend(
- TransformerSeqLayer(
- d_model=d_model,
- n_head=n_head,
- attn_span=attn_span,
- adapt_span_layer=adapt_span_layer,
- **kargs
- )
- for _ in range(n_layer)
- )
-
- def forward(self, x, h_cache, target=None):
- # x size = B x M
- block_size = x.size(1)
- h = self.in_emb(x) # B x M x H
- if self.emb_dropout is not None:
- h = self.emb_dropout(h)
-
- h_cache_next = []
- for l, layer in enumerate(self.layers):
- cache_size = layer.attn.attn.get_cache_size()
- if cache_size > block_size:
- h_cache_next_l = torch.cat(
- [h_cache[l][:, -cache_size + block_size :, :], h], dim=1
- ).detach()
- else:
- h_cache_next_l = h[:, -cache_size:, :].detach()
- h_cache_next.append(h_cache_next_l)
- h = layer(h, h_cache[l], self.key_pe) # B x M x H
-
- if self.emb_dropout is not None:
- h = self.emb_dropout(h)
-
- out = F.log_softmax(self.out_emb(h).float(), dim=-1).type_as(h)
- dummy_loss = None
-
- return out, h_cache_next, dummy_loss
-
- def get_aux_loss(self):
- loss = 0.0
- for layer in self.layers:
- loss += layer.attn.attn.adaptive_span.get_loss()
- return self.aux_loss_scaler * loss
-
- def get_current_max_span(self):
- max_span = 0.0
- for layer in self.layers:
- max_span = max(
- max_span, layer.attn.attn.adaptive_span.get_current_max_span()
- )
- return max_span
-
- def get_current_avg_span(self):
- avg_span = 0.0
- for layer in self.layers:
- avg_span += layer.attn.attn.adaptive_span.get_current_avg_span()
- return avg_span / len(self.layers)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py
deleted file mode 100644
index 38c7ac492f390a367a64769d7a72fe228df097c7..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py
+++ /dev/null
@@ -1,431 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import gc
-import os.path as osp
-import warnings
-from collections import deque, namedtuple
-from typing import Any, Dict, Tuple
-
-import numpy as np
-import torch
-from fairseq import tasks
-from fairseq.data.dictionary import Dictionary
-from fairseq.dataclass.utils import convert_namespace_to_omegaconf
-from fairseq.models.fairseq_model import FairseqModel
-from fairseq.utils import apply_to_sample
-from omegaconf import open_dict, OmegaConf
-
-from typing import List
-
-from .decoder_config import FlashlightDecoderConfig
-from .base_decoder import BaseDecoder
-
-try:
- from flashlight.lib.text.decoder import (
- LM,
- CriterionType,
- DecodeResult,
- KenLM,
- LexiconDecoder,
- LexiconDecoderOptions,
- LexiconFreeDecoder,
- LexiconFreeDecoderOptions,
- LMState,
- SmearingMode,
- Trie,
- )
- from flashlight.lib.text.dictionary import create_word_dict, load_words
-except ImportError:
- warnings.warn(
- "flashlight python bindings are required to use this functionality. "
- "Please install from "
- "https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
- )
- LM = object
- LMState = object
-
-
-class KenLMDecoder(BaseDecoder):
- def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
- super().__init__(tgt_dict)
-
- self.nbest = cfg.nbest
- self.unitlm = cfg.unitlm
-
- if cfg.lexicon:
- self.lexicon = load_words(cfg.lexicon)
- self.word_dict = create_word_dict(self.lexicon)
- self.unk_word = self.word_dict.get_index("")
-
- self.lm = KenLM(cfg.lmpath, self.word_dict)
- self.trie = Trie(self.vocab_size, self.silence)
-
- start_state = self.lm.start(False)
- for word, spellings in self.lexicon.items():
- word_idx = self.word_dict.get_index(word)
- _, score = self.lm.score(start_state, word_idx)
- for spelling in spellings:
- spelling_idxs = [tgt_dict.index(token) for token in spelling]
- assert (
- tgt_dict.unk() not in spelling_idxs
- ), f"{word} {spelling} {spelling_idxs}"
- self.trie.insert(spelling_idxs, word_idx, score)
- self.trie.smear(SmearingMode.MAX)
-
- self.decoder_opts = LexiconDecoderOptions(
- beam_size=cfg.beam,
- beam_size_token=cfg.beamsizetoken or len(tgt_dict),
- beam_threshold=cfg.beamthreshold,
- lm_weight=cfg.lmweight,
- word_score=cfg.wordscore,
- unk_score=cfg.unkweight,
- sil_score=cfg.silweight,
- log_add=False,
- criterion_type=CriterionType.CTC,
- )
-
- self.decoder = LexiconDecoder(
- self.decoder_opts,
- self.trie,
- self.lm,
- self.silence,
- self.blank,
- self.unk_word,
- [],
- self.unitlm,
- )
- else:
- assert self.unitlm, "Lexicon-free decoding requires unit LM"
-
- d = {w: [[w]] for w in tgt_dict.symbols}
- self.word_dict = create_word_dict(d)
- self.lm = KenLM(cfg.lmpath, self.word_dict)
- self.decoder_opts = LexiconFreeDecoderOptions(
- beam_size=cfg.beam,
- beam_size_token=cfg.beamsizetoken or len(tgt_dict),
- beam_threshold=cfg.beamthreshold,
- lm_weight=cfg.lmweight,
- sil_score=cfg.silweight,
- log_add=False,
- criterion_type=CriterionType.CTC,
- )
- self.decoder = LexiconFreeDecoder(
- self.decoder_opts, self.lm, self.silence, self.blank, []
- )
-
- def get_timesteps(self, token_idxs: List[int]) -> List[int]:
- """Returns frame numbers corresponding to every non-blank token.
-
- Parameters
- ----------
- token_idxs : List[int]
- IDs of decoded tokens.
-
- Returns
- -------
- List[int]
- Frame numbers corresponding to every non-blank token.
- """
- timesteps = []
- for i, token_idx in enumerate(token_idxs):
- if token_idx == self.blank:
- continue
- if i == 0 or token_idx != token_idxs[i-1]:
- timesteps.append(i)
- return timesteps
-
- def decode(
- self,
- emissions: torch.FloatTensor,
- ) -> List[List[Dict[str, torch.LongTensor]]]:
- B, T, N = emissions.size()
- hypos = []
- for b in range(B):
- emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
- results = self.decoder.decode(emissions_ptr, T, N)
-
- nbest_results = results[: self.nbest]
- hypos.append(
- [
- {
- "tokens": self.get_tokens(result.tokens),
- "score": result.score,
- "timesteps": self.get_timesteps(result.tokens),
- "words": [
- self.word_dict.get_entry(x) for x in result.words if x >= 0
- ],
- }
- for result in nbest_results
- ]
- )
- return hypos
-
-
-FairseqLMState = namedtuple(
- "FairseqLMState",
- [
- "prefix",
- "incremental_state",
- "probs",
- ],
-)
-
-
-class FairseqLM(LM):
- def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None:
- super().__init__()
-
- self.dictionary = dictionary
- self.model = model
- self.unk = self.dictionary.unk()
-
- self.save_incremental = False # this currently does not work properly
- self.max_cache = 20_000
-
- if torch.cuda.is_available():
- model.cuda()
- model.eval()
- model.make_generation_fast_()
-
- self.states = {}
- self.stateq = deque()
-
- def start(self, start_with_nothing: bool) -> LMState:
- state = LMState()
- prefix = torch.LongTensor([[self.dictionary.eos()]])
- incremental_state = {} if self.save_incremental else None
- with torch.no_grad():
- res = self.model(prefix.cuda(), incremental_state=incremental_state)
- probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
-
- if incremental_state is not None:
- incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
- self.states[state] = FairseqLMState(
- prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
- )
- self.stateq.append(state)
-
- return state
-
- def score(
- self,
- state: LMState,
- token_index: int,
- no_cache: bool = False,
- ) -> Tuple[LMState, int]:
- """
- Evaluate language model based on the current lm state and new word
- Parameters:
- -----------
- state: current lm state
- token_index: index of the word
- (can be lexicon index then you should store inside LM the
- mapping between indices of lexicon and lm, or lm index of a word)
- Returns:
- --------
- (LMState, float): pair of (new state, score for the current word)
- """
- curr_state = self.states[state]
-
- def trim_cache(targ_size: int) -> None:
- while len(self.stateq) > targ_size:
- rem_k = self.stateq.popleft()
- rem_st = self.states[rem_k]
- rem_st = FairseqLMState(rem_st.prefix, None, None)
- self.states[rem_k] = rem_st
-
- if curr_state.probs is None:
- new_incremental_state = (
- curr_state.incremental_state.copy()
- if curr_state.incremental_state is not None
- else None
- )
- with torch.no_grad():
- if new_incremental_state is not None:
- new_incremental_state = apply_to_sample(
- lambda x: x.cuda(), new_incremental_state
- )
- elif self.save_incremental:
- new_incremental_state = {}
-
- res = self.model(
- torch.from_numpy(curr_state.prefix).cuda(),
- incremental_state=new_incremental_state,
- )
- probs = self.model.get_normalized_probs(
- res, log_probs=True, sample=None
- )
-
- if new_incremental_state is not None:
- new_incremental_state = apply_to_sample(
- lambda x: x.cpu(), new_incremental_state
- )
-
- curr_state = FairseqLMState(
- curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
- )
-
- if not no_cache:
- self.states[state] = curr_state
- self.stateq.append(state)
-
- score = curr_state.probs[token_index].item()
-
- trim_cache(self.max_cache)
-
- outstate = state.child(token_index)
- if outstate not in self.states and not no_cache:
- prefix = np.concatenate(
- [curr_state.prefix, torch.LongTensor([[token_index]])], -1
- )
- incr_state = curr_state.incremental_state
-
- self.states[outstate] = FairseqLMState(prefix, incr_state, None)
-
- if token_index == self.unk:
- score = float("-inf")
-
- return outstate, score
-
- def finish(self, state: LMState) -> Tuple[LMState, int]:
- """
- Evaluate eos for language model based on the current lm state
- Returns:
- --------
- (LMState, float): pair of (new state, score for the current word)
- """
- return self.score(state, self.dictionary.eos())
-
- def empty_cache(self) -> None:
- self.states = {}
- self.stateq = deque()
- gc.collect()
-
-
-class FairseqLMDecoder(BaseDecoder):
- def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None:
- super().__init__(tgt_dict)
-
- self.nbest = cfg.nbest
- self.unitlm = cfg.unitlm
-
- self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None
- self.idx_to_wrd = {}
-
- checkpoint = torch.load(cfg.lmpath, map_location="cpu")
-
- if "cfg" in checkpoint and checkpoint["cfg"] is not None:
- lm_args = checkpoint["cfg"]
- else:
- lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
-
- if not OmegaConf.is_dict(lm_args):
- lm_args = OmegaConf.create(lm_args)
-
- with open_dict(lm_args.task):
- lm_args.task.data = osp.dirname(cfg.lmpath)
-
- task = tasks.setup_task(lm_args.task)
- model = task.build_model(lm_args.model)
- model.load_state_dict(checkpoint["model"], strict=False)
-
- self.trie = Trie(self.vocab_size, self.silence)
-
- self.word_dict = task.dictionary
- self.unk_word = self.word_dict.unk()
- self.lm = FairseqLM(self.word_dict, model)
-
- if self.lexicon:
- start_state = self.lm.start(False)
- for i, (word, spellings) in enumerate(self.lexicon.items()):
- if self.unitlm:
- word_idx = i
- self.idx_to_wrd[i] = word
- score = 0
- else:
- word_idx = self.word_dict.index(word)
- _, score = self.lm.score(start_state, word_idx, no_cache=True)
-
- for spelling in spellings:
- spelling_idxs = [tgt_dict.index(token) for token in spelling]
- assert (
- tgt_dict.unk() not in spelling_idxs
- ), f"{spelling} {spelling_idxs}"
- self.trie.insert(spelling_idxs, word_idx, score)
- self.trie.smear(SmearingMode.MAX)
-
- self.decoder_opts = LexiconDecoderOptions(
- beam_size=cfg.beam,
- beam_size_token=cfg.beamsizetoken or len(tgt_dict),
- beam_threshold=cfg.beamthreshold,
- lm_weight=cfg.lmweight,
- word_score=cfg.wordscore,
- unk_score=cfg.unkweight,
- sil_score=cfg.silweight,
- log_add=False,
- criterion_type=CriterionType.CTC,
- )
-
- self.decoder = LexiconDecoder(
- self.decoder_opts,
- self.trie,
- self.lm,
- self.silence,
- self.blank,
- self.unk_word,
- [],
- self.unitlm,
- )
- else:
- assert self.unitlm, "Lexicon-free decoding requires unit LM"
-
- d = {w: [[w]] for w in tgt_dict.symbols}
- self.word_dict = create_word_dict(d)
- self.lm = KenLM(cfg.lmpath, self.word_dict)
- self.decoder_opts = LexiconFreeDecoderOptions(
- beam_size=cfg.beam,
- beam_size_token=cfg.beamsizetoken or len(tgt_dict),
- beam_threshold=cfg.beamthreshold,
- lm_weight=cfg.lmweight,
- sil_score=cfg.silweight,
- log_add=False,
- criterion_type=CriterionType.CTC,
- )
- self.decoder = LexiconFreeDecoder(
- self.decoder_opts, self.lm, self.silence, self.blank, []
- )
-
- def decode(
- self,
- emissions: torch.FloatTensor,
- ) -> List[List[Dict[str, torch.LongTensor]]]:
- B, T, N = emissions.size()
- hypos = []
-
- def make_hypo(result: DecodeResult) -> Dict[str, Any]:
- hypo = {
- "tokens": self.get_tokens(result.tokens),
- "score": result.score,
- }
- if self.lexicon:
- hypo["words"] = [
- self.idx_to_wrd[x] if self.unitlm else self.word_dict[x]
- for x in result.words
- if x >= 0
- ]
- return hypo
-
- for b in range(B):
- emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
- results = self.decoder.decode(emissions_ptr, T, N)
-
- nbest_results = results[: self.nbest]
- hypos.append([make_hypo(result) for result in nbest_results])
- self.lm.empty_cache()
-
- return hypos
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libnat_cuda/binding.cpp b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libnat_cuda/binding.cpp
deleted file mode 100644
index ced91c0d0afab9071842911d9876e6360d90284a..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libnat_cuda/binding.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Copyright 2017-present, Facebook, Inc.
- * All rights reserved.
- *
- * This source code is licensed under the license found in the
- * LICENSE file in the root directory of this source tree.
- */
-
-/*
- This code is partially adpoted from
- https://github.com/1ytic/pytorch-edit-distance
- */
-
-#include
-#include "edit_dist.h"
-
-#ifndef TORCH_CHECK
-#define TORCH_CHECK AT_CHECK
-#endif
-
-#define CHECK_CUDA(x) \
- TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
-#define CHECK_CONTIGUOUS(x) \
- TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
-#define CHECK_INPUT(x) \
- CHECK_CUDA(x); \
- CHECK_CONTIGUOUS(x)
-
-torch::Tensor LevenshteinDistance(
- torch::Tensor source,
- torch::Tensor target,
- torch::Tensor source_length,
- torch::Tensor target_length) {
- CHECK_INPUT(source);
- CHECK_INPUT(target);
- CHECK_INPUT(source_length);
- CHECK_INPUT(target_length);
- return LevenshteinDistanceCuda(source, target, source_length, target_length);
-}
-
-torch::Tensor GenerateDeletionLabel(
- torch::Tensor source,
- torch::Tensor operations) {
- CHECK_INPUT(source);
- CHECK_INPUT(operations);
- return GenerateDeletionLabelCuda(source, operations);
-}
-
-std::pair GenerateInsertionLabel(
- torch::Tensor target,
- torch::Tensor operations) {
- CHECK_INPUT(target);
- CHECK_INPUT(operations);
- return GenerateInsertionLabelCuda(target, operations);
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def("levenshtein_distance", &LevenshteinDistance, "Levenshtein distance");
- m.def(
- "generate_deletion_labels",
- &GenerateDeletionLabel,
- "Generate Deletion Label");
- m.def(
- "generate_insertion_labels",
- &GenerateInsertionLabel,
- "Generate Insertion Label");
-}
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_denoising.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_denoising.py
deleted file mode 100644
index d1c914917feb5165aad7482cd1377f5f65b21635..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_denoising.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-
-import numpy as np
-from fairseq.data import (
- AppendTokenDataset,
- ConcatDataset,
- DenoisingDataset,
- Dictionary,
- PrependTokenDataset,
- ResamplingDataset,
- SortDataset,
- TokenBlockDataset,
- data_utils,
-)
-from fairseq.data.encoders.utils import get_whole_word_mask
-from fairseq.tasks import register_task
-
-from .denoising import DenoisingTask
-
-
-logger = logging.getLogger(__name__)
-
-
-@register_task("multilingual_denoising")
-class MultilingualDenoisingTask(DenoisingTask):
- @staticmethod
- def add_args(parser):
- DenoisingTask.add_args(parser)
- parser.add_argument(
- "--multilang-sampling-alpha",
- type=float,
- default=1.0,
- help="smoothing alpha for sample ratios across multiple datasets",
- )
- parser.add_argument("--add-lang-token", default=False, action="store_true")
- parser.add_argument(
- "--langs", type=str, help="language ids we are considering", default=None
- )
- parser.add_argument(
- "--no-whole-word-mask-langs",
- type=str,
- default="",
- metavar="N",
- help="languages without spacing between words dont support whole word masking",
- )
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- """Setup the task."""
- paths = args.data.split(":")
- assert len(paths) > 0
- dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
-
- data_path = paths[0]
- if args.langs is None:
- languages = sorted(
- [
- name
- for name in os.listdir(data_path)
- if os.path.isdir(os.path.join(data_path, name))
- ]
- )
- else:
- languages = args.langs.split(",")
-
- if args.add_lang_token:
- for lang in languages:
- dictionary.add_symbol("[{}]".format(lang))
-
- logger.info("dictionary: {} types".format(len(dictionary)))
- if not hasattr(args, "shuffle_instance"):
- args.shuffle_instance = False
- return cls(args, dictionary)
-
- def __init__(self, args, dictionary):
- super().__init__(args, dictionary)
- self.dictionary = dictionary
- self.seed = args.seed
-
- # add mask token
- self.mask_idx = self.dictionary.add_symbol("")
- self.langs = args.langs
- self.args = args
-
- def _get_sample_prob(self, dataset_lens):
- """
- Get smoothed sampling porbability by languages. This helps low resource
- languages by upsampling them.
- """
- prob = dataset_lens / dataset_lens.sum()
- smoothed_prob = prob ** self.args.multilang_sampling_alpha
- smoothed_prob = smoothed_prob / smoothed_prob.sum()
- return smoothed_prob
-
- def load_dataset(self, split, epoch=1, combine=False, **kwargs):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- paths = self.args.data.split(":")
- assert len(paths) > 0
- data_path = paths[(epoch - 1) % len(paths)]
- split_path = os.path.join(data_path, split)
-
- if self.langs is None:
- languages = sorted(
- [
- name
- for name in os.listdir(data_path)
- if os.path.isdir(os.path.join(data_path, name))
- ]
- )
- else:
- languages = self.langs.split(",")
- for name in languages:
- p = os.path.join(data_path, name)
- assert os.path.exists(p), "data not found: {}".format(p)
-
- logger.info("Training on {0} languages: {1}".format(len(languages), languages))
- logger.info(
- "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
- )
-
- mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
- language_without_segmentations = self.args.no_whole_word_mask_langs.split(",")
- lang_datasets = []
- for language in languages:
- split_path = os.path.join(data_path, language, split)
-
- dataset = data_utils.load_indexed_dataset(
- split_path,
- self.source_dictionary,
- self.args.dataset_impl,
- combine=combine,
- )
- if dataset is None:
- raise FileNotFoundError(
- "Dataset not found: {} ({})".format(split, split_path)
- )
-
- end_token = (
- self.source_dictionary.index("[{}]".format(language))
- if self.args.add_lang_token
- else self.source_dictionary.eos()
- )
-
- # create continuous blocks of tokens
- dataset = TokenBlockDataset(
- dataset,
- dataset.sizes,
- self.args.tokens_per_sample - 2, # one less for
- pad=self.source_dictionary.pad(),
- eos=end_token,
- break_mode=self.args.sample_break_mode,
- )
- logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
-
- # prepend beginning-of-sentence token (, equiv. to [CLS] in BERT)
- dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
- dataset = AppendTokenDataset(dataset, end_token)
-
- lang_mask_whole_words = (
- mask_whole_words
- if language not in language_without_segmentations
- else None
- )
- lang_dataset = DenoisingDataset(
- dataset,
- dataset.sizes,
- self.dictionary,
- self.mask_idx,
- lang_mask_whole_words,
- shuffle=self.args.shuffle_instance,
- seed=self.seed,
- args=self.args,
- eos=None
- if not self.args.add_lang_token
- else self.source_dictionary.index("[{}]".format(language)),
- )
- lang_datasets.append(lang_dataset)
-
- dataset_lengths = np.array(
- [len(d) for d in lang_datasets],
- dtype=float,
- )
- logger.info(
- "loaded total {} blocks for all languages".format(
- int(dataset_lengths.sum()),
- )
- )
- if split == self.args.train_subset:
- # For train subset, additionally up or down sample languages.
- sample_probs = self._get_sample_prob(dataset_lengths)
- logger.info(
- "Sample probability by language: {}".format(
- {
- lang: "{0:.4f}".format(sample_probs[id])
- for id, lang in enumerate(languages)
- }
- )
- )
- size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
- logger.info(
- "Up/Down Sampling ratio by language: {}".format(
- {
- lang: "{0:.2f}".format(size_ratio[id])
- for id, lang in enumerate(languages)
- }
- )
- )
-
- resampled_lang_datasets = [
- ResamplingDataset(
- lang_datasets[i],
- size_ratio=size_ratio[i],
- seed=self.args.seed,
- epoch=epoch,
- replace=size_ratio[i] >= 1.0,
- )
- for i, d in enumerate(lang_datasets)
- ]
- dataset = ConcatDataset(
- resampled_lang_datasets,
- )
- else:
- dataset = ConcatDataset(lang_datasets)
- lang_splits = [split]
- for lang_id, lang_dataset in enumerate(lang_datasets):
- split_name = split + "_" + languages[lang_id]
- lang_splits.append(split_name)
- self.datasets[split_name] = lang_dataset
-
- if split in self.args.valid_subset:
- self.args.valid_subset = self.args.valid_subset.replace(
- split, ",".join(lang_splits)
- )
-
- with data_utils.numpy_seed(self.args.seed + epoch):
- shuffle = np.random.permutation(len(dataset))
-
- self.datasets[split] = SortDataset(
- dataset,
- sort_order=[
- shuffle,
- dataset.sizes,
- ],
- )
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/hifi_gan/env.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/hifi_gan/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/hifi_gan/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
-
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/HgMenon/Transcribe_V0.2/src/config.py b/spaces/HgMenon/Transcribe_V0.2/src/config.py
deleted file mode 100644
index 333054c4d4d645f09cfbc31654199f97b530cbb4..0000000000000000000000000000000000000000
--- a/spaces/HgMenon/Transcribe_V0.2/src/config.py
+++ /dev/null
@@ -1,147 +0,0 @@
-from enum import Enum
-import urllib
-
-import os
-from typing import List
-from urllib.parse import urlparse
-import json5
-import torch
-
-from tqdm import tqdm
-
-class ModelConfig:
- def __init__(self, name: str, url: str, path: str = None, type: str = "whisper"):
- """
- Initialize a model configuration.
-
- name: Name of the model
- url: URL to download the model from
- path: Path to the model file. If not set, the model will be downloaded from the URL.
- type: Type of model. Can be whisper or huggingface.
- """
- self.name = name
- self.url = url
- self.path = path
- self.type = type
-
-class VadInitialPromptMode(Enum):
- PREPEND_ALL_SEGMENTS = 1
- PREPREND_FIRST_SEGMENT = 2
-
- @staticmethod
- def from_string(s: str):
- normalized = s.lower() if s is not None else None
-
- if normalized == "prepend_all_segments":
- return VadInitialPromptMode.PREPEND_ALL_SEGMENTS
- elif normalized == "prepend_first_segment":
- return VadInitialPromptMode.PREPREND_FIRST_SEGMENT
- else:
- raise ValueError(f"Invalid value for VadInitialPromptMode: {s}")
-
-class ApplicationConfig:
- def __init__(self, models: List[ModelConfig] = [], input_audio_max_duration: int = 600,
- share: bool = False, server_name: str = None, server_port: int = 7860,
- queue_concurrency_count: int = 1, delete_uploaded_files: bool = True,
- whisper_implementation: str = "whisper",
- default_model_name: str = "medium", default_vad: str = "silero-vad",
- vad_parallel_devices: str = "", vad_cpu_cores: int = 1, vad_process_timeout: int = 1800,
- auto_parallel: bool = False, output_dir: str = None,
- model_dir: str = None, device: str = None,
- verbose: bool = True, task: str = "transcribe", language: str = None,
- vad_initial_prompt_mode: str = "prepend_first_segment ",
- vad_merge_window: float = 5, vad_max_merge_size: float = 30,
- vad_padding: float = 1, vad_prompt_window: float = 3,
- temperature: float = 0, best_of: int = 5, beam_size: int = 5,
- patience: float = None, length_penalty: float = None,
- suppress_tokens: str = "-1", initial_prompt: str = None,
- condition_on_previous_text: bool = True, fp16: bool = True,
- compute_type: str = "float16",
- temperature_increment_on_fallback: float = 0.2, compression_ratio_threshold: float = 2.4,
- logprob_threshold: float = -1.0, no_speech_threshold: float = 0.6,
- # Word timestamp settings
- word_timestamps: bool = False, prepend_punctuations: str = "\"\'“¿([{-",
- append_punctuations: str = "\"\'.。,,!!??::”)]}、",
- highlight_words: bool = False):
-
- self.models = models
-
- # WebUI settings
- self.input_audio_max_duration = input_audio_max_duration
- self.share = share
- self.server_name = server_name
- self.server_port = server_port
- self.queue_concurrency_count = queue_concurrency_count
- self.delete_uploaded_files = delete_uploaded_files
-
- self.whisper_implementation = whisper_implementation
- self.default_model_name = default_model_name
- self.default_vad = default_vad
- self.vad_parallel_devices = vad_parallel_devices
- self.vad_cpu_cores = vad_cpu_cores
- self.vad_process_timeout = vad_process_timeout
- self.auto_parallel = auto_parallel
- self.output_dir = output_dir
-
- self.model_dir = model_dir
- self.device = device
- self.verbose = verbose
- self.task = task
- self.language = language
- self.vad_initial_prompt_mode = vad_initial_prompt_mode
- self.vad_merge_window = vad_merge_window
- self.vad_max_merge_size = vad_max_merge_size
- self.vad_padding = vad_padding
- self.vad_prompt_window = vad_prompt_window
- self.temperature = temperature
- self.best_of = best_of
- self.beam_size = beam_size
- self.patience = patience
- self.length_penalty = length_penalty
- self.suppress_tokens = suppress_tokens
- self.initial_prompt = initial_prompt
- self.condition_on_previous_text = condition_on_previous_text
- self.fp16 = fp16
- self.compute_type = compute_type
- self.temperature_increment_on_fallback = temperature_increment_on_fallback
- self.compression_ratio_threshold = compression_ratio_threshold
- self.logprob_threshold = logprob_threshold
- self.no_speech_threshold = no_speech_threshold
-
- # Word timestamp settings
- self.word_timestamps = word_timestamps
- self.prepend_punctuations = prepend_punctuations
- self.append_punctuations = append_punctuations
- self.highlight_words = highlight_words
-
- def get_model_names(self):
- return [ x.name for x in self.models ]
-
- def update(self, **new_values):
- result = ApplicationConfig(**self.__dict__)
-
- for key, value in new_values.items():
- setattr(result, key, value)
- return result
-
- @staticmethod
- def create_default(**kwargs):
- app_config = ApplicationConfig.parse_file(os.environ.get("WHISPER_WEBUI_CONFIG", "config.json5"))
-
- # Update with kwargs
- if len(kwargs) > 0:
- app_config = app_config.update(**kwargs)
- return app_config
-
- @staticmethod
- def parse_file(config_path: str):
- import json5
-
- with open(config_path, "r", encoding="utf-8") as f:
- # Load using json5
- data = json5.load(f)
- data_models = data.pop("models", [])
-
- models = [ ModelConfig(**x) for x in data_models ]
-
- return ApplicationConfig(models, **data)
diff --git a/spaces/HighCWu/starganv2vc-paddle/starganv2vc_paddle/losses.py b/spaces/HighCWu/starganv2vc-paddle/starganv2vc_paddle/losses.py
deleted file mode 100644
index 59907b675ef298969933def7bca3a128bbe3137d..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/starganv2vc-paddle/starganv2vc_paddle/losses.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#coding:utf-8
-
-import os
-import paddle
-
-from paddle import nn
-from munch import Munch
-from starganv2vc_paddle.transforms import build_transforms
-
-import paddle.nn.functional as F
-import numpy as np
-
-def compute_d_loss(nets, args, x_real, y_org, y_trg, z_trg=None, x_ref=None, use_r1_reg=True, use_adv_cls=False, use_con_reg=False):
- args = Munch(args)
-
- assert (z_trg is None) != (x_ref is None)
- # with real audios
- x_real.stop_gradient = False
- out = nets.discriminator(x_real, y_org)
- loss_real = adv_loss(out, 1)
-
- # R1 regularizaition (https://arxiv.org/abs/1801.04406v4)
- if use_r1_reg:
- loss_reg = r1_reg(out, x_real)
- else:
- loss_reg = paddle.to_tensor([0.], dtype=paddle.float32)
-
- # consistency regularization (bCR-GAN: https://arxiv.org/abs/2002.04724)
- loss_con_reg = paddle.to_tensor([0.], dtype=paddle.float32)
- if use_con_reg:
- t = build_transforms()
- out_aug = nets.discriminator(t(x_real).detach(), y_org)
- loss_con_reg += F.smooth_l1_loss(out, out_aug)
-
- # with fake audios
- with paddle.no_grad():
- if z_trg is not None:
- s_trg = nets.mapping_network(z_trg, y_trg)
- else: # x_ref is not None
- s_trg = nets.style_encoder(x_ref, y_trg)
-
- F0 = nets.f0_model.get_feature_GAN(x_real)
- x_fake = nets.generator(x_real, s_trg, masks=None, F0=F0)
- out = nets.discriminator(x_fake, y_trg)
- loss_fake = adv_loss(out, 0)
- if use_con_reg:
- out_aug = nets.discriminator(t(x_fake).detach(), y_trg)
- loss_con_reg += F.smooth_l1_loss(out, out_aug)
-
- # adversarial classifier loss
- if use_adv_cls:
- out_de = nets.discriminator.classifier(x_fake)
- loss_real_adv_cls = F.cross_entropy(out_de[y_org != y_trg], y_org[y_org != y_trg])
-
- if use_con_reg:
- out_de_aug = nets.discriminator.classifier(t(x_fake).detach())
- loss_con_reg += F.smooth_l1_loss(out_de, out_de_aug)
- else:
- loss_real_adv_cls = paddle.zeros([1]).mean()
-
- loss = loss_real + loss_fake + args.lambda_reg * loss_reg + \
- args.lambda_adv_cls * loss_real_adv_cls + \
- args.lambda_con_reg * loss_con_reg
-
- return loss, Munch(real=loss_real.item(),
- fake=loss_fake.item(),
- reg=loss_reg.item(),
- real_adv_cls=loss_real_adv_cls.item(),
- con_reg=loss_con_reg.item())
-
-def compute_g_loss(nets, args, x_real, y_org, y_trg, z_trgs=None, x_refs=None, use_adv_cls=False):
- args = Munch(args)
-
- assert (z_trgs is None) != (x_refs is None)
- if z_trgs is not None:
- z_trg, z_trg2 = z_trgs
- if x_refs is not None:
- x_ref, x_ref2 = x_refs
-
- # compute style vectors
- if z_trgs is not None:
- s_trg = nets.mapping_network(z_trg, y_trg)
- else:
- s_trg = nets.style_encoder(x_ref, y_trg)
-
- # compute ASR/F0 features (real)
- with paddle.no_grad():
- F0_real, GAN_F0_real, cyc_F0_real = nets.f0_model(x_real)
- ASR_real = nets.asr_model.get_feature(x_real)
-
- # adversarial loss
- x_fake = nets.generator(x_real, s_trg, masks=None, F0=GAN_F0_real)
- out = nets.discriminator(x_fake, y_trg)
- loss_adv = adv_loss(out, 1)
-
- # compute ASR/F0 features (fake)
- F0_fake, GAN_F0_fake, _ = nets.f0_model(x_fake)
- ASR_fake = nets.asr_model.get_feature(x_fake)
-
- # norm consistency loss
- x_fake_norm = log_norm(x_fake)
- x_real_norm = log_norm(x_real)
- loss_norm = ((paddle.nn.ReLU()(paddle.abs(x_fake_norm - x_real_norm) - args.norm_bias))**2).mean()
-
- # F0 loss
- loss_f0 = f0_loss(F0_fake, F0_real)
-
- # style F0 loss (style initialization)
- if x_refs is not None and args.lambda_f0_sty > 0 and not use_adv_cls:
- F0_sty, _, _ = nets.f0_model(x_ref)
- loss_f0_sty = F.l1_loss(compute_mean_f0(F0_fake), compute_mean_f0(F0_sty))
- else:
- loss_f0_sty = paddle.zeros([1]).mean()
-
- # ASR loss
- loss_asr = F.smooth_l1_loss(ASR_fake, ASR_real)
-
- # style reconstruction loss
- s_pred = nets.style_encoder(x_fake, y_trg)
- loss_sty = paddle.mean(paddle.abs(s_pred - s_trg))
-
- # diversity sensitive loss
- if z_trgs is not None:
- s_trg2 = nets.mapping_network(z_trg2, y_trg)
- else:
- s_trg2 = nets.style_encoder(x_ref2, y_trg)
- x_fake2 = nets.generator(x_real, s_trg2, masks=None, F0=GAN_F0_real)
- x_fake2 = x_fake2.detach()
- _, GAN_F0_fake2, _ = nets.f0_model(x_fake2)
- loss_ds = paddle.mean(paddle.abs(x_fake - x_fake2))
- loss_ds += F.smooth_l1_loss(GAN_F0_fake, GAN_F0_fake2.detach())
-
- # cycle-consistency loss
- s_org = nets.style_encoder(x_real, y_org)
- x_rec = nets.generator(x_fake, s_org, masks=None, F0=GAN_F0_fake)
- loss_cyc = paddle.mean(paddle.abs(x_rec - x_real))
- # F0 loss in cycle-consistency loss
- if args.lambda_f0 > 0:
- _, _, cyc_F0_rec = nets.f0_model(x_rec)
- loss_cyc += F.smooth_l1_loss(cyc_F0_rec, cyc_F0_real)
- if args.lambda_asr > 0:
- ASR_recon = nets.asr_model.get_feature(x_rec)
- loss_cyc += F.smooth_l1_loss(ASR_recon, ASR_real)
-
- # adversarial classifier loss
- if use_adv_cls:
- out_de = nets.discriminator.classifier(x_fake)
- loss_adv_cls = F.cross_entropy(out_de[y_org != y_trg], y_trg[y_org != y_trg])
- else:
- loss_adv_cls = paddle.zeros([1]).mean()
-
- loss = args.lambda_adv * loss_adv + args.lambda_sty * loss_sty \
- - args.lambda_ds * loss_ds + args.lambda_cyc * loss_cyc\
- + args.lambda_norm * loss_norm \
- + args.lambda_asr * loss_asr \
- + args.lambda_f0 * loss_f0 \
- + args.lambda_f0_sty * loss_f0_sty \
- + args.lambda_adv_cls * loss_adv_cls
-
- return loss, Munch(adv=loss_adv.item(),
- sty=loss_sty.item(),
- ds=loss_ds.item(),
- cyc=loss_cyc.item(),
- norm=loss_norm.item(),
- asr=loss_asr.item(),
- f0=loss_f0.item(),
- adv_cls=loss_adv_cls.item())
-
-# for norm consistency loss
-def log_norm(x, mean=-4, std=4, axis=2):
- """
- normalized log mel -> mel -> norm -> log(norm)
- """
- x = paddle.log(paddle.exp(x * std + mean).norm(axis=axis))
- return x
-
-# for adversarial loss
-def adv_loss(logits, target):
- assert target in [1, 0]
- if len(logits.shape) > 1:
- logits = logits.reshape([-1])
- targets = paddle.full_like(logits, fill_value=target)
- logits = logits.clip(min=-10, max=10) # prevent nan
- loss = F.binary_cross_entropy_with_logits(logits, targets)
- return loss
-
-# for R1 regularization loss
-def r1_reg(d_out, x_in):
- # zero-centered gradient penalty for real images
- batch_size = x_in.shape[0]
- grad_dout = paddle.grad(
- outputs=d_out.sum(), inputs=x_in,
- create_graph=True, retain_graph=True, only_inputs=True
- )[0]
- grad_dout2 = grad_dout.pow(2)
- assert(grad_dout2.shape == x_in.shape)
- reg = 0.5 * grad_dout2.reshape((batch_size, -1)).sum(1).mean(0)
- return reg
-
-# for F0 consistency loss
-def compute_mean_f0(f0):
- f0_mean = f0.mean(-1)
- f0_mean = f0_mean.expand((f0.shape[-1], f0_mean.shape[0])).transpose((1, 0)) # (B, M)
- return f0_mean
-
-def f0_loss(x_f0, y_f0):
- """
- x.shape = (B, 1, M, L): predict
- y.shape = (B, 1, M, L): target
- """
- # compute the mean
- x_mean = compute_mean_f0(x_f0)
- y_mean = compute_mean_f0(y_f0)
- loss = F.l1_loss(x_f0 / x_mean, y_f0 / y_mean)
- return loss
\ No newline at end of file
diff --git a/spaces/Hoodady/3DFuse/voxnerf/data.py b/spaces/Hoodady/3DFuse/voxnerf/data.py
deleted file mode 100644
index 3367842834c10872b3a0255d9b0f7b70358830f7..0000000000000000000000000000000000000000
--- a/spaces/Hoodady/3DFuse/voxnerf/data.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from pathlib import Path
-import json
-import numpy as np
-import imageio
-from .utils import blend_rgba
-
-
-def load_blender(split, scene="lego", half_res=False):
- assert split in ("train", "val", "test")
-
- env_fname = Path(__file__).resolve().parents[1] / "env.json"
- with env_fname.open("r") as f:
- root = json.load(f)['data_root']
- root = Path(root) / scene
-
- with open(root / f'transforms_{split}.json', "r") as f:
- meta = json.load(f)
-
- imgs, poses = [], []
-
- for frame in meta['frames']:
- file_name = root / f"{frame['file_path']}.png"
- im = imageio.imread(file_name)
- c2w = frame['transform_matrix']
-
- imgs.append(im)
- poses.append(c2w)
-
- imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs
- imgs = blend_rgba(imgs)
- poses = np.array(poses).astype(float)
- # print(imgs.shape)
- H, W = imgs[0].shape[:2]
- W = 64
- H = 64
- camera_angle_x = float(meta['camera_angle_x'])
- f = 1 / np.tan(camera_angle_x / 2) * (W / 2)
-
- if half_res:
- raise NotImplementedError()
-
- K = np.array([
- [f, 0, -(W/2 - 0.5)],
- [0, -f, -(H/2 - 0.5)],
- [0, 0, -1]
- ]) # note OpenGL -ve z convention;
-
- return imgs, K, poses
diff --git a/spaces/HuggingFaceM4/OBELICS-Interactive-Map/style.css b/spaces/HuggingFaceM4/OBELICS-Interactive-Map/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceM4/OBELICS-Interactive-Map/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/HugoDzz/spaceship_drift/postcss.config.js b/spaces/HugoDzz/spaceship_drift/postcss.config.js
deleted file mode 100644
index 2e7af2b7f1a6f391da1631d93968a9d487ba977d..0000000000000000000000000000000000000000
--- a/spaces/HugoDzz/spaceship_drift/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-export default {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-}
diff --git a/spaces/HypermindLabs/Snore-Detector/README.md b/spaces/HypermindLabs/Snore-Detector/README.md
deleted file mode 100644
index b9db1f0d1e9b0c3fb725b9ff39eca2c71c6970ba..0000000000000000000000000000000000000000
--- a/spaces/HypermindLabs/Snore-Detector/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Snore Detector
-emoji: 🏢
-colorFrom: yellow
-colorTo: green
-sdk: streamlit
-sdk_version: 1.27.2
-app_file: app.py
-pinned: false
-license: cc-by-nc-nd-4.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ICML2022/PointCloudC/DGCNN.py b/spaces/ICML2022/PointCloudC/DGCNN.py
deleted file mode 100644
index 46a0d46ff0fa2be2b57d165dfe5049b4db36d8c7..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/PointCloudC/DGCNN.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Author: Yue Wang
-@Contact: yuewangx@mit.edu
-@File: model.py
-@Time: 2018/10/13 6:35 PM
-"""
-
-import os
-import sys
-import copy
-import math
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-def knn(x, k):
- inner = -2 * torch.matmul(x.transpose(2, 1), x)
- xx = torch.sum(x ** 2, dim=1, keepdim=True)
- pairwise_distance = -xx - inner - xx.transpose(2, 1)
-
- idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
- return idx
-
-
-def get_graph_feature(x, k=20, idx=None):
- batch_size = x.size(0)
- num_points = x.size(2)
- x = x.view(batch_size, -1, num_points)
- if idx is None:
- idx = knn(x, k=k) # (batch_size, num_points, k)
- device = torch.device('cpu')
-
- idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
-
- idx = idx + idx_base
-
- idx = idx.view(-1)
-
- _, num_dims, _ = x.size()
-
- x = x.transpose(2,
- 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
- feature = x.view(batch_size * num_points, -1)[idx, :]
- feature = feature.view(batch_size, num_points, k, num_dims)
- x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
-
- feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 1, 2).contiguous()
-
- return feature
-
-class DGCNN(nn.Module):
- def __init__(self, output_channels=40):
- super(DGCNN, self).__init__()
- self.k = 20
- emb_dims = 1024
- dropout = 0.5
-
- self.bn1 = nn.BatchNorm2d(64)
- self.bn2 = nn.BatchNorm2d(64)
- self.bn3 = nn.BatchNorm2d(128)
- self.bn4 = nn.BatchNorm2d(256)
- self.bn5 = nn.BatchNorm1d(emb_dims)
-
- self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
- self.bn1,
- nn.LeakyReLU(negative_slope=0.2))
- self.conv2 = nn.Sequential(nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False),
- self.bn2,
- nn.LeakyReLU(negative_slope=0.2))
- self.conv3 = nn.Sequential(nn.Conv2d(64 * 2, 128, kernel_size=1, bias=False),
- self.bn3,
- nn.LeakyReLU(negative_slope=0.2))
- self.conv4 = nn.Sequential(nn.Conv2d(128 * 2, 256, kernel_size=1, bias=False),
- self.bn4,
- nn.LeakyReLU(negative_slope=0.2))
- self.conv5 = nn.Sequential(nn.Conv1d(512, emb_dims, kernel_size=1, bias=False),
- self.bn5,
- nn.LeakyReLU(negative_slope=0.2))
- self.linear1 = nn.Linear(emb_dims * 2, 512, bias=False)
- self.bn6 = nn.BatchNorm1d(512)
- self.dp1 = nn.Dropout(p=dropout)
- self.linear2 = nn.Linear(512, 256)
- self.bn7 = nn.BatchNorm1d(256)
- self.dp2 = nn.Dropout(p=dropout)
- self.linear3 = nn.Linear(256, output_channels)
-
- def forward(self, x):
- batch_size = x.size(0)
- x = get_graph_feature(x, k=self.k)
- x = self.conv1(x)
- x1 = x.max(dim=-1, keepdim=False)[0]
-
- x = get_graph_feature(x1, k=self.k)
- x = self.conv2(x)
- x2 = x.max(dim=-1, keepdim=False)[0]
-
- x = get_graph_feature(x2, k=self.k)
- x = self.conv3(x)
- x3 = x.max(dim=-1, keepdim=False)[0]
-
- x = get_graph_feature(x3, k=self.k)
- x = self.conv4(x)
- x4 = x.max(dim=-1, keepdim=False)[0]
-
- x = torch.cat((x1, x2, x3, x4), dim=1)
-
- x = self.conv5(x)
- x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
- x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
- x = torch.cat((x1, x2), 1)
-
- x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
- x = self.dp1(x)
- x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
- x = self.dp2(x)
- x = self.linear3(x)
- return x
\ No newline at end of file
diff --git a/spaces/ICML2022/PointCloudC/util/util.py b/spaces/ICML2022/PointCloudC/util/util.py
deleted file mode 100644
index 00afdd8435f42ad52480945285459c5cd3576d73..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/PointCloudC/util/util.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-
-def cal_loss(pred, gold, smoothing=True):
- ''' Calculate cross entropy loss, apply label smoothing if needed. '''
-
- gold = gold.contiguous().view(-1) # gold is the groudtruth label in the dataloader
-
- if smoothing:
- eps = 0.2
- n_class = pred.size(1) # the number of feature_dim of the ouput, which is output channels
-
- one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
- one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
- log_prb = F.log_softmax(pred, dim=1)
-
- loss = -(one_hot * log_prb).sum(dim=1).mean()
- else:
- loss = F.cross_entropy(pred, gold, reduction='mean')
-
- return loss
-
-
-# create a file and write the text into it:
-class IOStream():
- def __init__(self, path):
- self.f = open(path, 'a')
-
- def cprint(self, text):
- print(text)
- self.f.write(text+'\n')
- self.f.flush()
-
- def close(self):
- self.f.close()
-
-
-def to_categorical(y, num_classes):
- """ 1-hot encodes a tensor """
- new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
- if (y.is_cuda):
- return new_y.cuda(non_blocking=True)
- return new_y
-
-
-def compute_overall_iou(pred, target, num_classes):
- shape_ious = []
- pred = pred.max(dim=2)[1] # (batch_size, num_points) the pred_class_idx of each point in each sample
- pred_np = pred.cpu().data.numpy()
-
- target_np = target.cpu().data.numpy()
- for shape_idx in range(pred.size(0)): # sample_idx
- part_ious = []
- for part in range(num_classes): # class_idx! no matter which category, only consider all part_classes of all categories, check all 50 classes
- # for target, each point has a class no matter which category owns this point! also 50 classes!!!
- # only return 1 when both belongs to this class, which means correct:
- I = np.sum(np.logical_and(pred_np[shape_idx] == part, target_np[shape_idx] == part))
- # always return 1 when either is belongs to this class:
- U = np.sum(np.logical_or(pred_np[shape_idx] == part, target_np[shape_idx] == part))
-
- F = np.sum(target_np[shape_idx] == part)
-
- if F != 0:
- iou = I / float(U) # iou across all points for this class
- part_ious.append(iou) # append the iou of this class
- shape_ious.append(np.mean(part_ious)) # each time append an average iou across all classes of this sample (sample_level!)
- return shape_ious # [batch_size]
diff --git a/spaces/ICML2022/resefa/third_party/stylegan3_official_ops/README.md b/spaces/ICML2022/resefa/third_party/stylegan3_official_ops/README.md
deleted file mode 100644
index 417b50ba3dde490f4a4c5c6dfc6afbc28ba640d0..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/resefa/third_party/stylegan3_official_ops/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Operators for StyleGAN2
-
-All files in this directory are borrowed from repository [stylegan3](https://github.com/NVlabs/stylegan3). Basically, these files implement customized operators, which are faster than the native operators from PyTorch, especially for second-derivative computation, including
-
-- `bias_act.bias_act()`: Fuse adding bias and then performing activation as one operator.
-- `upfirdn2d.setup_filter()`: Set up the kernel used for filtering.
-- `upfirdn2d.filter2d()`: Filtering a 2D feature map with given kernel.
-- `upfirdn2d.upsample2d()`: Upsampling a 2D feature map.
-- `upfirdn2d.downsample2d()`: Downsampling a 2D feature map.
-- `upfirdn2d.upfirdn2d()`: Upsampling, filtering, and then downsampling a 2D feature map.
-- `filtered_lrelu.filtered_lrelu()`: Leaky ReLU layer, wrapped with upsampling and downsampling for anti-aliasing.
-- `conv2d_gradfix.conv2d()`: Convolutional layer, supporting arbitrarily high order gradients and fixing gradient when computing penalty.
-- `conv2d_gradfix.conv_transpose2d()`: Transposed convolutional layer, supporting arbitrarily high order gradients and fixing gradient when computing penalty.
-- `conv2d_resample.conv2d_resample()`: Wraps `upfirdn2d()` and `conv2d()` (or `conv_transpose2d()`). This is not used in our network implementation (*i.e.*, `models/stylegan2_generator.py` and `models/stylegan2_discriminator.py`)
-
-We make following slight modifications beyond disabling some lint warnings:
-
-- Line 24 of file `misc.py`: Use `EasyDict` from module `easydict` to replace that from `dnnlib` from [stylegan3](https://github.com/NVlabs/stylegan3).
-- Line 36 of file `custom_ops.py`: Disable log message when setting up customized operators.
-- Line 54/109 of file `custom_ops.py`: Add necessary CUDA compiler path. (***NOTE**: If your cuda binary does not locate at `/usr/local/cuda/bin`, please specify in function `_find_compiler_bindir_posix()`.*)
-- Line 21 of file `bias_act.py`: Use `EasyDict` from module `easydict` to replace that from `dnnlib` from [stylegan3](https://github.com/NVlabs/stylegan3).
-- Line 162-165 of file `filtered_lrelu.py`: Change some implementations in `_filtered_lrelu_ref()` to `ref`.
-- Line 31 of file `grid_sample_gradfix.py`: Enable customized grid sampling operator by default.
-- Line 35 of file `grid_sample_gradfix.py`: Use `impl` to disable customized grid sample operator.
-- Line 34 of file `conv2d_gradfix.py`: Enable customized convolution operators by default.
-- Line 48/53 of file `conv2d_gradfix.py`: Use `impl` to disable customized convolution operators.
-- Line 36/53 of file `conv2d_resample.py`: Use `impl` to disable customized convolution operators.
-- Line 23 of file `fma.py`: Use `impl` to disable customized add-multiply operator.
-
-Please use `ref` or `cuda` to choose which implementation to use. `ref` refers to native PyTorch operators while `cuda` refers to the customized operators from the official repository. `cuda` is used by default.
diff --git a/spaces/JMalott/ai_architecture/min_dalle/__init__.py b/spaces/JMalott/ai_architecture/min_dalle/__init__.py
deleted file mode 100644
index 83962cb7d534feec2d114cbe5351548f745092f2..0000000000000000000000000000000000000000
--- a/spaces/JMalott/ai_architecture/min_dalle/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .min_dalle import MinDalle
\ No newline at end of file
diff --git a/spaces/Jaehan/Code-Generator-1/app.py b/spaces/Jaehan/Code-Generator-1/app.py
deleted file mode 100644
index 02c094ceb6d8355c98aa01ea0cfaf3aacea920a4..0000000000000000000000000000000000000000
--- a/spaces/Jaehan/Code-Generator-1/app.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from transformers import AutoTokenizer, AutoModelForCausalLM
-import gradio as gr
-
-model_name = "Salesforce/codegen-350M-mono"
-codegen_token = AutoTokenizer.from_pretrained(model_name)
-model = AutoModelForCausalLM.from_pretrained(model_name)
-
-def codegen(intent):
- """Give input as text which reflects intent of the program.
- """
- #text = "Write a function which takes 2 numbers as input and returns the larger of the two."
-
- input_ids = codegen_token(intent, return_tensors="pt").input_ids
- outcode_ids = model.generate(input_ids, max_length=256)
- response = codegen_token.decode(outcode_ids[0], skip_special_tokens=True)
- return response
-
-# UX
-in_text = gr.Textbox(lines=1, label="Place your intent here.")
-out = gr.Textbox(lines=1, label="Generated python code", placeholder="")
-gr.Interface(codegen, inputs=in_text, outputs=out).launch()
\ No newline at end of file
diff --git a/spaces/Jayavathsan/ChatGPT_CloneWithSummary/app.py b/spaces/Jayavathsan/ChatGPT_CloneWithSummary/app.py
deleted file mode 100644
index f954a3c3746bcdd8bf4184125c601312016a7b9a..0000000000000000000000000000000000000000
--- a/spaces/Jayavathsan/ChatGPT_CloneWithSummary/app.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import streamlit as st
-from streamlit_chat import message
-from langchain import OpenAI
-from langchain.chains import ConversationChain
-from langchain.chains.conversation.memory import (ConversationBufferMemory,
- ConversationSummaryMemory,
- ConversationBufferWindowMemory
-
- )
-
-if 'conversation' not in st.session_state:
- st.session_state['conversation'] =None
-if 'messages' not in st.session_state:
- st.session_state['messages'] =[]
-if 'API_Key' not in st.session_state:
- st.session_state['API_Key'] =''
-
-# Setting page title and header
-st.set_page_config(page_title="Chat GPT Clone", page_icon=":robot_face:")
-st.markdown("
How can I assist you?
", unsafe_allow_html=True)
-
-
-st.sidebar.title("😎")
-st.session_state['API_Key']= st.sidebar.text_input("What's your API key?",type="password")
-summarise_button = st.sidebar.button("Summarise the conversation", key="summarise")
-if summarise_button:
- summarise_placeholder = st.sidebar.write("Nice chatting with you my friend :\n\n"+st.session_state['conversation'].memory.buffer)
- #summarise_placeholder.write("Nice chatting with you my friend :\n\n"+st.session_state['conversation'].memory.buffer)
-
-#import os
-#os.environ["OPENAI_API_KEY"] = "yourOpenAI API KEY goes here" #but not required for this project
-
-def getresponse(userInput, api_key):
-
- if st.session_state['conversation'] is None:
-
- llm = OpenAI(
- temperature=0,
- openai_api_key=api_key,
- model_name='text-davinci-003' #we can also use 'gpt-3.5-turbo'
- )
-
- st.session_state['conversation'] = ConversationChain(
- llm=llm,
- verbose=True,
- memory=ConversationSummaryMemory(llm=llm)
- )
-
- response=st.session_state['conversation'].predict(input=userInput)
- print(st.session_state['conversation'].memory.buffer)
-
-
- return response
-
-
-
-response_container = st.container()
-# Here we will have a container for user input text box
-container = st.container()
-
-
-with container:
- with st.form(key='my_form', clear_on_submit=True):
- user_input = st.text_area("Your question goes here:", key='input', height=100)
- submit_button = st.form_submit_button(label='Send')
-
- if submit_button:
- st.session_state['messages'].append(user_input)
- model_response=getresponse(user_input,st.session_state['API_Key'])
- st.session_state['messages'].append(model_response)
-
-
- with response_container:
- for i in range(len(st.session_state['messages'])):
- if (i % 2) == 0:
- message(st.session_state['messages'][i], is_user=True, key=str(i) + '_user')
- else:
- message(st.session_state['messages'][i], key=str(i) + '_AI')
-
-
-
-
diff --git a/spaces/JessPink/Text_rewriting-Chatbot/README.md b/spaces/JessPink/Text_rewriting-Chatbot/README.md
deleted file mode 100644
index 5a775374d3051d28a9658ba20454ec744ff84c40..0000000000000000000000000000000000000000
--- a/spaces/JessPink/Text_rewriting-Chatbot/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Text Rewriting-Chatbot
-emoji: 🐢
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 4.1.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/app.py b/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/app.py
deleted file mode 100644
index 86eb3fdef1dc820e43f040435512978db1d1bc2a..0000000000000000000000000000000000000000
--- a/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/app.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import streamlit as st
-import random
-import re
-from transformers import pipeline, set_seed
-
-gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
-
-with open("examples.txt", "r") as f:
- line = f.readlines()
-
-def generate(starting_text):
- seed = random.randint(100, 1000000)
- set_seed(seed)
-
- if starting_text == "":
- starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
- starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
-
- response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
- response_list = []
- for x in response:
- resp = x['generated_text'].strip()
- if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
- response_list.append(resp+'\n')
-
- response_end = "\n".join(response_list)
- response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
- response_end = response_end.replace("<", "").replace(">", "")
-
- if response_end != "":
- return response_end
-
-st.title("Stable Diffusion Prompt Generator")
-
-st.markdown("This is a web app for [this](https://huggingface.co/Gustavosta/MagicPrompt-Stable-Diffusion) model trained by Gustavosta for Stable Diffusion to create a Prompt from a few words. You can submit your own text or select from provided examples.")
-
-starting_text = st.text_input(label="Initial Text", placeholder="Text here", value="")
-
-if st.button("Generate"):
- result = generate(starting_text)
- st.write("
{}
".format(" ".join(result.splitlines())), unsafe_allow_html=True)
-
-examples = []
-for x in range(5):
- examples.append(line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize())
-
-st.write("")
-st.write("
Examples:
",unsafe_allow_html=True)
-for example in examples:
- st.write("
• {}
".format(example), unsafe_allow_html=True)
diff --git a/spaces/KarmKarma/rvc-models-genshinimpact/vc_infer_pipeline.py b/spaces/KarmKarma/rvc-models-genshinimpact/vc_infer_pipeline.py
deleted file mode 100644
index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000
--- a/spaces/KarmKarma/rvc-models-genshinimpact/vc_infer_pipeline.py
+++ /dev/null
@@ -1,306 +0,0 @@
-import numpy as np, parselmouth, torch, pdb
-from time import time as ttime
-import torch.nn.functional as F
-from config import x_pad, x_query, x_center, x_max
-import scipy.signal as signal
-import pyworld, os, traceback, faiss
-from scipy import signal
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-
-class VC(object):
- def __init__(self, tgt_sr, device, is_half):
- self.sr = 16000 # hubert输入采样率
- self.window = 160 # 每帧点数
- self.t_pad = self.sr * x_pad # 每条前后pad时间
- self.t_pad_tgt = tgt_sr * x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * x_query # 查询切点前后查询时间
- self.t_center = self.sr * x_center # 查询切点位置
- self.t_max = self.sr * x_max # 免查询时长阈值
- self.device = device
- self.is_half = is_half
-
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- if f0_method == "pm":
- f0 = (
- parselmouth.Sound(x, self.sr)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=self.sr,
- f0_ceil=f0_max,
- f0_floor=f0_min,
- frame_period=10,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
- f0 = signal.medfilt(f0, 3)
- f0 *= pow(2, f0_up_key / 12)
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- tf0 = self.sr // self.window # 每秒f0点数
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
- )
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- f0bak = f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak # 1-0
-
- def vc(
- self,
- model,
- net_g,
- sid,
- audio0,
- pitch,
- pitchf,
- times,
- index,
- big_npy,
- index_rate,
- ): # ,file_index,file_big_npy
- feats = torch.from_numpy(audio0)
- if self.is_half:
- feats = feats.half()
- else:
- feats = feats.float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = {
- "source": feats.to(self.device),
- "padding_mask": padding_mask,
- "output_layer": 9, # layer 9
- }
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0])
-
- if (
- isinstance(index, type(None)) == False
- and isinstance(big_npy, type(None)) == False
- and index_rate != 0
- ):
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float32")
- _, I = index.search(npy, 1)
- npy = big_npy[I.squeeze()]
- if self.is_half:
- npy = npy.astype("float16")
- feats = (
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
- + (1 - index_rate) * feats
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- t1 = ttime()
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch != None and pitchf != None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
- p_len = torch.tensor([p_len], device=self.device).long()
- with torch.no_grad():
- if pitch != None and pitchf != None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- else:
- audio1 = (
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- del feats, p_len, padding_mask
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- t2 = ttime()
- times[0] += t1 - t0
- times[2] += t2 - t1
- return audio1
-
- def pipeline(
- self,
- model,
- net_g,
- sid,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- f0_file=None,
- ):
- if (
- file_big_npy != ""
- and file_index != ""
- and os.path.exists(file_big_npy) == True
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- big_npy = np.load(file_big_npy)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- print("Feature retrieval library doesn't exist or ratio is 0")
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- t1 = ttime()
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float32")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- t2 = ttime()
- times[1] += t2 - t1
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- s = t
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- pitch[:, t // self.window :] if t is not None else pitch,
- pitchf[:, t // self.window :] if t is not None else pitchf,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- audio_opt = np.concatenate(audio_opt)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
diff --git a/spaces/KonradSzafer/HF-QA-Demo/qa_engine/response.py b/spaces/KonradSzafer/HF-QA-Demo/qa_engine/response.py
deleted file mode 100644
index a1226ff01ee44f1799e1a16a975a7aa00f8851ab..0000000000000000000000000000000000000000
--- a/spaces/KonradSzafer/HF-QA-Demo/qa_engine/response.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from typing import List
-
-
-class Response:
- def __init__(self):
- self.answer = ''
- self.sources = []
-
- def set_answer(self, answer: str) -> None:
- self.answer = answer
-
- def set_sources(self, sources: List) -> None:
- self.sources = list(set(map(str, sources)))
-
- def get_sources(self) -> List[str]:
- return self.sources
-
- def get_sources_as_text(self) -> str:
- if not self.sources:
- return ''
- sources_text = '\n\nSources:'
- for i, (source) in enumerate(self.sources):
- sources_text += f'\n [{i+1}] {source}'
- return sources_text
-
- def get_answer(self, include_sources: bool = False) -> str:
- answer = self.answer
- if include_sources:
- answer += self.get_sources_as_text()
- return answer
-
- def __str__(self):
- return self.get_answer(include_sources=True)
diff --git a/spaces/KyanChen/FunSR/models/cnn_models/transenet.py b/spaces/KyanChen/FunSR/models/cnn_models/transenet.py
deleted file mode 100644
index 0e33e731ac93e819cbdf768ea9c9e6e31daa9ed4..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/FunSR/models/cnn_models/transenet.py
+++ /dev/null
@@ -1,226 +0,0 @@
-from . import common
-
-import torch
-import torch.nn as nn
-from einops import rearrange, repeat
-
-from models import register
-from .transformer import TransformerEncoder, TransformerDecoder
-from argparse import Namespace
-
-MIN_NUM_PATCHES = 12
-
-
-def make_model(args, parent=False):
- return TransENet(args)
-
-
-class BasicModule(nn.Module):
- def __init__(self, conv, n_feat, kernel_size, block_type='basic', bias=True,
- bn=False, act=nn.ReLU(True)):
- super(BasicModule, self).__init__()
-
- self.block_type = block_type
-
- m_body = []
- if block_type == 'basic':
- n_blocks = 10
- m_body = [
- common.BasicBlock(conv, n_feat, n_feat, kernel_size, bias=bias, bn=bn)
- # common.ResBlock(conv, n_feat, kernel_size)
- for _ in range(n_blocks)
- ]
- elif block_type == 'residual':
- n_blocks = 5
- m_body = [
- common.ResBlock(conv, n_feat, kernel_size)
- for _ in range(n_blocks)
- ]
- else:
- print('Error: not support this type')
- self.body = nn.Sequential(*m_body)
-
- def forward(self, x):
-
- res = self.body(x)
- if self.block_type == 'basic':
- out = res + x
- elif self.block_type == 'residual':
- out = res
-
- return out
-
-
-@register('TransENet')
-def TransENet(scale_ratio, n_feats=64, rgb_range=1):
- args = Namespace()
- args.n_feats = n_feats
- args.scale = [scale_ratio]
- args.patch_size = 48 * args.scale[0]
-
- args.rgb_range = rgb_range
- args.n_colors = 3
- args.en_depth = 6
- args.de_depth = 1
- return TransENet(args)
-
-
-class TransENet(nn.Module):
-
- def __init__(self, args, conv=common.default_conv):
- super(TransENet, self).__init__()
-
- self.args = args
- self.scale = args.scale[0]
- n_feats = args.n_feats
- kernel_size = 3
- act = nn.ReLU(True)
-
- # rgb_mean = (0.4916, 0.4991, 0.4565) # UCMerced data
- # rgb_std = (1.0, 1.0, 1.0)
- #
- # self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
-
- # define head body
- m_head = [
- conv(args.n_colors, n_feats, kernel_size),
- ]
- self.head = nn.Sequential(*m_head)
-
- # define main body
- self.feat_extrat_stage1 = BasicModule(conv, n_feats, kernel_size, block_type='residual', act=act)
- self.feat_extrat_stage2 = BasicModule(conv, n_feats, kernel_size, block_type='residual', act=act)
- self.feat_extrat_stage3 = BasicModule(conv, n_feats, kernel_size, block_type='residual', act=act)
-
- reduction = 4
- self.stage1_conv1x1 = conv(n_feats, n_feats // reduction, 1)
- self.stage2_conv1x1 = conv(n_feats, n_feats // reduction, 1)
- self.stage3_conv1x1 = conv(n_feats, n_feats // reduction, 1)
- self.up_conv1x1 = conv(n_feats, n_feats // reduction, 1)
- self.span_conv1x1 = conv(n_feats // reduction, n_feats, 1)
-
- self.upsampler = common.Upsampler(conv, self.scale, n_feats, act=False)
-
- # define tail body
- self.tail = conv(n_feats, args.n_colors, kernel_size)
- # self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
-
- # define transformer
- image_size = args.patch_size // self.scale
- patch_size = 4
- dim = 512
- en_depth = args.en_depth
- de_depth = args.de_depth
- heads = 6
- mlp_dim = 512
- channels = n_feats // reduction
- dim_head = 32
- dropout = 0.0
-
- assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
- num_patches = (image_size // patch_size) ** 2
-
- patch_dim = channels * patch_size ** 2
- assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for attention to be effective (at least 16). Try decreasing your patch size'
-
- self.patch_size = patch_size
- self.patch_to_embedding_low1 = nn.Linear(patch_dim, dim)
- self.patch_to_embedding_low2 = nn.Linear(patch_dim, dim)
- self.patch_to_embedding_low3 = nn.Linear(patch_dim, dim)
- self.patch_to_embedding_high = nn.Linear(patch_dim, dim)
-
- self.embedding_to_patch = nn.Linear(dim, patch_dim)
-
- self.encoder_stage1 = TransformerEncoder(dim, en_depth, heads, dim_head, mlp_dim, dropout)
- self.encoder_stage2 = TransformerEncoder(dim, en_depth, heads, dim_head, mlp_dim, dropout)
- self.encoder_stage3 = TransformerEncoder(dim, en_depth, heads, dim_head, mlp_dim, dropout)
- self.encoder_up = TransformerEncoder(dim, en_depth, heads, dim_head, mlp_dim, dropout)
-
- self.decoder1 = TransformerDecoder(dim, de_depth, heads, dim_head, mlp_dim, dropout)
- self.decoder2 = TransformerDecoder(dim, de_depth, heads, dim_head, mlp_dim, dropout)
- self.decoder3 = TransformerDecoder(dim, de_depth, heads, dim_head, mlp_dim, dropout)
-
-
- def forward(self, x, out_size=None):
-
- # x = self.sub_mean(x)
- x = self.head(x)
-
- # feature extraction part
- feat_stage1 = self.feat_extrat_stage1(x)
- feat_stage2 = self.feat_extrat_stage2(x)
- feat_stage3 = self.feat_extrat_stage3(x)
- feat_ups = self.upsampler(feat_stage3)
-
- feat_stage1 = self.stage1_conv1x1(feat_stage1)
- feat_stage2 = self.stage2_conv1x1(feat_stage2)
- feat_stage3 = self.stage3_conv1x1(feat_stage3)
- feat_ups = self.up_conv1x1(feat_ups)
-
- # transformer part:
- p = self.patch_size
-
- feat_stage1 = rearrange(feat_stage1, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
- feat_stage2 = rearrange(feat_stage2, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p)
- feat_stage3 = rearrange(feat_stage3, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p)
- feat_ups = rearrange(feat_ups, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
-
- feat_stage1 = self.patch_to_embedding_low1(feat_stage1)
- feat_stage2 = self.patch_to_embedding_low2(feat_stage2)
- feat_stage3 = self.patch_to_embedding_low3(feat_stage3)
- feat_ups = self.patch_to_embedding_high(feat_ups)
-
- # encoder
- feat_stage1 = self.encoder_stage1(feat_stage1)
- feat_stage2 = self.encoder_stage2(feat_stage2)
- feat_stage3 = self.encoder_stage3(feat_stage3)
- feat_ups = self.encoder_up(feat_ups)
-
- feat_ups = self.decoder3(feat_ups, feat_stage3)
- feat_ups = self.decoder2(feat_ups, feat_stage2)
- feat_ups = self.decoder1(feat_ups, feat_stage1)
-
- feat_ups = self.embedding_to_patch(feat_ups)
- feat_ups = rearrange(feat_ups, 'b (h w) (p1 p2 c) -> b c (h p1) (w p2)', h=self.args.patch_size // p, p1=p, p2=p)
-
- feat_ups = self.span_conv1x1(feat_ups)
-
- x = self.tail(feat_ups)
- # x = self.add_mean(x)
-
- return x
-
- def load_state_dict(self, state_dict, strict=False):
- own_state = self.state_dict()
- for name, param in state_dict.items():
- if name in own_state:
- if isinstance(param, nn.Parameter):
- param = param.data
- try:
- own_state[name].copy_(param)
- except Exception:
- if name.find('tail') >= 0:
- print('Replace pre-trained upsampler to new one...')
- else:
- raise RuntimeError('While copying the parameter named {}, '
- 'whose dimensions in the model are {} and '
- 'whose dimensions in the checkpoint are {}.'
- .format(name, own_state[name].size(), param.size()))
- elif strict:
- if name.find('tail') == -1:
- raise KeyError('unexpected key "{}" in state_dict'
- .format(name))
-
- if strict:
- missing = set(own_state.keys()) - set(state_dict.keys())
- if len(missing) > 0:
- raise KeyError('missing keys in state_dict: "{}"'.format(missing))
-
-
-if __name__ == "__main__":
- from option import args
- model = TransENet(args)
- model.eval()
- input = torch.rand(1, 3, 48, 48)
- sr = model(input)
- print(sr.size())
\ No newline at end of file
diff --git a/spaces/LanguageBind/LanguageBind/languagebind/__init__.py b/spaces/LanguageBind/LanguageBind/languagebind/__init__.py
deleted file mode 100644
index 850acdbe15487a1560362acdaa38b0841ce1ab90..0000000000000000000000000000000000000000
--- a/spaces/LanguageBind/LanguageBind/languagebind/__init__.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import torch
-from torch import nn
-from transformers import AutoConfig
-
-from .image.configuration_image import LanguageBindImageConfig
-from .image.modeling_image import LanguageBindImage
-from .image.tokenization_image import LanguageBindImageTokenizer
-from .image.processing_image import LanguageBindImageProcessor
-
-from .video.configuration_video import LanguageBindVideoConfig
-from .video.modeling_video import LanguageBindVideo
-from .video.tokenization_video import LanguageBindVideoTokenizer
-from .video.processing_video import LanguageBindVideoProcessor
-
-from .depth.configuration_depth import LanguageBindDepthConfig
-from .depth.modeling_depth import LanguageBindDepth
-from .depth.tokenization_depth import LanguageBindDepthTokenizer
-from .depth.processing_depth import LanguageBindDepthProcessor
-
-from .audio.configuration_audio import LanguageBindAudioConfig
-from .audio.modeling_audio import LanguageBindAudio
-from .audio.tokenization_audio import LanguageBindAudioTokenizer
-from .audio.processing_audio import LanguageBindAudioProcessor
-
-from .thermal.configuration_thermal import LanguageBindThermalConfig
-from .thermal.modeling_thermal import LanguageBindThermal
-from .thermal.tokenization_thermal import LanguageBindThermalTokenizer
-from .thermal.processing_thermal import LanguageBindThermalProcessor
-
-
-
-config_dict = {
- 'thermal': LanguageBindThermalConfig,
- 'image': LanguageBindImageConfig,
- 'video': LanguageBindVideoConfig,
- 'depth': LanguageBindDepthConfig,
- 'audio': LanguageBindAudioConfig
-}
-model_dict = {
- 'thermal': LanguageBindThermal,
- 'image': LanguageBindImage,
- 'video': LanguageBindVideo,
- 'depth': LanguageBindDepth,
- 'audio': LanguageBindAudio
-}
-transform_dict = {
- 'video': LanguageBindVideoProcessor,
- 'audio': LanguageBindAudioProcessor,
- 'depth': LanguageBindDepthProcessor,
- 'thermal': LanguageBindThermalProcessor,
- 'image': LanguageBindImageProcessor,
-}
-
-class LanguageBind(nn.Module):
- def __init__(self, clip_type=('thermal', 'image', 'video', 'depth', 'audio'), use_temp=True, cache_dir='./cache_dir'):
- super(LanguageBind, self).__init__()
- self.use_temp = use_temp
- self.modality_encoder = {}
- self.modality_proj = {}
- self.modality_scale = {}
- self.modality_config = {}
- for c in clip_type:
- pretrained_ckpt = f'lb203/LanguageBind_{c.capitalize()}'
- model = model_dict[c].from_pretrained(pretrained_ckpt, cache_dir=cache_dir)
- self.modality_encoder[c] = model.vision_model
- self.modality_proj[c] = model.visual_projection
- self.modality_scale[c] = model.logit_scale
- self.modality_config[c] = model.config
- self.modality_encoder['language'] = model.text_model
- self.modality_proj['language'] = model.text_projection
-
- self.modality_encoder = nn.ModuleDict(self.modality_encoder)
- self.modality_proj = nn.ModuleDict(self.modality_proj)
-
- def forward(self, inputs):
- outputs = {}
- for key, value in inputs.items():
- value = self.modality_encoder[key](**value)[1]
- value = self.modality_proj[key](value)
- value = value / value.norm(p=2, dim=-1, keepdim=True)
- if self.use_temp:
- if key != 'language':
- value = value * self.modality_scale[key].exp()
- outputs[key] = value
- return outputs
-
-def to_device(x, device):
- out_dict = {k: v.to(device) for k, v in x.items()}
- return out_dict
\ No newline at end of file
diff --git a/spaces/LaynzKunz/AI-Cover-Gen-Web-Ui/src/webui.py b/spaces/LaynzKunz/AI-Cover-Gen-Web-Ui/src/webui.py
deleted file mode 100644
index 8b01dfb755e220c7c61e0ed146c232e4d1484e57..0000000000000000000000000000000000000000
--- a/spaces/LaynzKunz/AI-Cover-Gen-Web-Ui/src/webui.py
+++ /dev/null
@@ -1,344 +0,0 @@
-import json
-import os
-import shutil
-import urllib.request
-import zipfile
-from argparse import ArgumentParser
-import pyttsx3
-import gradio as gr
-
-from main import song_cover_pipeline
-import playsound
-
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-mdxnet_models_dir = os.path.join(BASE_DIR, 'mdxnet_models')
-rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models')
-output_dir = os.path.join(BASE_DIR, 'song_output')
-
-
-def get_current_models(models_dir):
- models_list = os.listdir(models_dir)
- items_to_remove = ['hubert_base.pt', 'MODELS.txt', 'public_models.json', 'rmvpe.pt']
- return [item for item in models_list if item not in items_to_remove]
-
-
-def update_models_list():
- models_l = get_current_models(rvc_models_dir)
- return gr.Dropdown.update(choices=models_l)
-
-
-def load_public_models():
- models_table = []
- for model in public_models['voice_models']:
- if not model['name'] in voice_models:
- model = [model['name'], model['description'], model['credit'], model['url'], ', '.join(model['tags'])]
- models_table.append(model)
-
- tags = list(public_models['tags'].keys())
- return gr.DataFrame.update(value=models_table), gr.CheckboxGroup.update(choices=tags)
-
-
-def extract_zip(extraction_folder, zip_name):
- os.makedirs(extraction_folder)
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
- zip_ref.extractall(extraction_folder)
- os.remove(zip_name)
-
- index_filepath, model_filepath = None, None
- for root, dirs, files in os.walk(extraction_folder):
- for name in files:
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
- index_filepath = os.path.join(root, name)
-
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
- model_filepath = os.path.join(root, name)
-
- if not model_filepath:
- raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
-
- # move model and index file to extraction folder
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
- if index_filepath:
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
-
- # remove any unnecessary nested folders
- for filepath in os.listdir(extraction_folder):
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
- shutil.rmtree(os.path.join(extraction_folder, filepath))
-
-
-def download_online_model(url, dir_name, progress=gr.Progress()):
- try:
- progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
- zip_name = url.split('/')[-1]
- extraction_folder = os.path.join(rvc_models_dir, dir_name)
- if os.path.exists(extraction_folder):
- raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
-
- if 'pixeldrain.com' in url:
- url = f'https://pixeldrain.com/api/file/{zip_name}'
-
- urllib.request.urlretrieve(url, zip_name)
-
- progress(0.5, desc='[~] Extracting zip...')
- extract_zip(extraction_folder, zip_name)
- return f'[+] {dir_name} Model successfully downloaded!'
-
- except Exception as e:
- raise gr.Error(str(e))
-
-
-def upload_local_model(zip_path, dir_name, progress=gr.Progress()):
- try:
- extraction_folder = os.path.join(rvc_models_dir, dir_name)
- if os.path.exists(extraction_folder):
- raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
-
- zip_name = zip_path.name
- progress(0.5, desc='[~] Extracting zip...')
- extract_zip(extraction_folder, zip_name)
- return f'[+] {dir_name} Model successfully uploaded!'
-
- except Exception as e:
- raise gr.Error(str(e))
-
-
-def filter_models(tags, query):
- models_table = []
-
- # no filter
- if len(tags) == 0 and len(query) == 0:
- for model in public_models['voice_models']:
- models_table.append([model['name'], model['description'], model['credit'], model['url'], model['tags']])
-
- # filter based on tags and query
- elif len(tags) > 0 and len(query) > 0:
- for model in public_models['voice_models']:
- if all(tag in model['tags'] for tag in tags):
- model_attributes = f"{model['name']} {model['description']} {model['credit']} {' '.join(model['tags'])}".lower()
- if query.lower() in model_attributes:
- models_table.append([model['name'], model['description'], model['credit'], model['url'], model['tags']])
-
- # filter based on only tags
- elif len(tags) > 0:
- for model in public_models['voice_models']:
- if all(tag in model['tags'] for tag in tags):
- models_table.append([model['name'], model['description'], model['credit'], model['url'], model['tags']])
-
- # filter based on only query
- else:
- for model in public_models['voice_models']:
- model_attributes = f"{model['name']} {model['description']} {model['credit']} {' '.join(model['tags'])}".lower()
- if query.lower() in model_attributes:
- models_table.append([model['name'], model['description'], model['credit'], model['url'], model['tags']])
-
- return gr.DataFrame.update(value=models_table)
-
-
-def pub_dl_autofill(pub_models, event: gr.SelectData):
- return gr.Text.update(value=pub_models.loc[event.index[0], 'URL']), gr.Text.update(value=pub_models.loc[event.index[0], 'Model Name'])
-
-
-def swap_visibility():
- return gr.update(visible=True), gr.update(visible=False), gr.update(value=''), gr.update(value=None)
-
-
-def process_file_upload(file):
- return file.name, gr.update(value=file.name)
-
-
-def show_hop_slider(pitch_detection_algo):
- if pitch_detection_algo == 'mangio-crepe':
- return gr.update(visible=True)
- else:
- return gr.update(visible=False)
-
-
-if __name__ == '__main__':
- parser = ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
- parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
- parser.add_argument("--listen", action="store_true", default=False, help="Make the WebUI reachable from your local network.")
- parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
- parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
- args = parser.parse_args()
-
- voice_models = get_current_models(rvc_models_dir)
- with open(os.path.join(rvc_models_dir, 'public_models.json'), encoding='utf8') as infile:
- public_models = json.load(infile)
-
- with gr.Blocks(title='AICoverGenWebUI') as app:
-
- gr.Label('AICoverGen WebUI created with ❤️', show_label=False)
-
- gr.Markdown("AI-Cover-Gen-No-UI [](https://colab.research.google.com/github/ardha27/AICoverGen-NoUI-Colab/blob/main/CoverGen_No_UI.ipynb)")
- gr.Markdown("Duplicate the space for use in private")
- gr.Markdown("[](https://huggingface.co/spaces/r3gm/AICoverGen?duplicate=true)\n\n")
-
- # main tab
-
-def load_custom_model(model_folder_path):
- engine = pyttsx3.init()
- engine.setProperty('rate', 150) # Adjust speech rate (optional)
- engine.setProperty('volume', 1) # Adjust speech volume (optional)
-
- engine.save_to_file('Text to be spoken', 'text.mp3') # Save the speech output to a file
-
- # Load the downloaded TTS model from the 'model_folder_path' directory
- engine.setProperty('voice', model_folder_path)
-
- engine.runAndWait() # Play the speech
-
-# Example Usage
-folder_name_model = 'folder_name'
-load_custom_model(f'rvc_models/{folder_name_model}')
-
- with gr.Tab("Generate"):
-
- with gr.Accordion('Main Options'):
- with gr.Row():
- with gr.Column():
- rvc_model = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "AICoverGen --> rvc_models". After new models are added into this folder, click the refresh button')
- ref_btn = gr.Button('Refresh Models 🔁', variant='primary')
-
- with gr.Column() as yt_link_col:
- song_input = gr.Text(label='Song input', info='Link to a song on YouTube or full path to a local file. For file upload, click the button below. Example: https://www.youtube.com/watch?v=M-mtdN6R3bQ')
- show_file_upload_button = gr.Button('Upload file instead')
-
- with gr.Column(visible=False) as file_upload_col:
- local_file = gr.File(label='Audio file')
- song_input_file = gr.UploadButton('Upload 📂', file_types=['audio'], variant='primary')
- show_yt_link_button = gr.Button('Paste YouTube link/Path to local file instead')
- song_input_file.upload(process_file_upload, inputs=[song_input_file], outputs=[local_file, song_input])
-
- with gr.Column():
- pitch = gr.Slider(-3, 3, value=0, step=1, label='Pitch Change (Vocals ONLY)', info='Generally, use 1 for male to female conversions and -1 for vice-versa. (Octaves)')
- pitch_all = gr.Slider(-12, 12, value=0, step=1, label='Overall Pitch Change', info='Changes pitch/key of vocals and instrumentals together. Altering this slightly reduces sound quality. (Semitones)')
- show_file_upload_button.click(swap_visibility, outputs=[file_upload_col, yt_link_col, song_input, local_file])
- show_yt_link_button.click(swap_visibility, outputs=[yt_link_col, file_upload_col, song_input, local_file])
-
- with gr.Accordion('Voice conversion options', open=False):
- with gr.Row():
- index_rate = gr.Slider(0, 1, value=0.5, label='Index Rate', info="Controls how much of the AI voice's accent to keep in the vocals")
- filter_radius = gr.Slider(0, 7, value=3, step=1, label='Filter radius', info='If >=3: apply median filtering median filtering to the harvested pitch results. Can reduce breathiness')
- rms_mix_rate = gr.Slider(0, 1, value=0.25, label='RMS mix rate', info="Control how much to mimic the original vocal's loudness (0) or a fixed loudness (1)")
- protect = gr.Slider(0, 0.5, value=0.33, label='Protect rate', info='Protect voiceless consonants and breath sounds. Set to 0.5 to disable.')
- with gr.Column():
- f0_method = gr.Dropdown(['rmvpe', 'mangio-crepe'], value='rmvpe', label='Pitch detection algorithm', info='Best option is rmvpe (clarity in vocals), then mangio-crepe (smoother vocals)')
- crepe_hop_length = gr.Slider(32, 320, value=128, step=1, visible=False, label='Crepe hop length', info='Lower values leads to longer conversions and higher risk of voice cracks, but better pitch accuracy.')
- f0_method.change(show_hop_slider, inputs=f0_method, outputs=crepe_hop_length)
- keep_files = gr.Checkbox(label='Keep intermediate files', info='Keep all audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals. Leave unchecked to save space')
-
- with gr.Accordion('Audio mixing options', open=False):
- gr.Markdown('### Volume Change (decibels)')
- with gr.Row():
- main_gain = gr.Slider(-20, 20, value=0, step=1, label='Main Vocals')
- backup_gain = gr.Slider(-20, 20, value=0, step=1, label='Backup Vocals')
- inst_gain = gr.Slider(-20, 20, value=0, step=1, label='Music')
-
- gr.Markdown('### Reverb Control on AI Vocals')
- with gr.Row():
- reverb_rm_size = gr.Slider(0, 1, value=0.15, label='Room size', info='The larger the room, the longer the reverb time')
- reverb_wet = gr.Slider(0, 1, value=0.2, label='Wetness level', info='Level of AI vocals with reverb')
- reverb_dry = gr.Slider(0, 1, value=0.8, label='Dryness level', info='Level of AI vocals without reverb')
- reverb_damping = gr.Slider(0, 1, value=0.7, label='Damping level', info='Absorption of high frequencies in the reverb')
-
- gr.Markdown('### Audio Output Format')
- output_format = gr.Dropdown(['mp3', 'wav'], value='mp3', label='Output file type', info='mp3: small file size, decent quality. wav: Large file size, best quality')
-
- with gr.Row():
- clear_btn = gr.ClearButton(value='Clear', components=[song_input, rvc_model, keep_files, local_file])
- generate_btn = gr.Button("Generate", variant='primary')
- ai_cover = gr.Audio(label='AI Cover', show_share_button=False)
-
- ref_btn.click(update_models_list, None, outputs=rvc_model)
- is_webui = gr.Number(value=1, visible=False)
- generate_btn.click(song_cover_pipeline,
- inputs=[song_input, rvc_model, pitch, keep_files, is_webui, main_gain, backup_gain,
- inst_gain, index_rate, filter_radius, rms_mix_rate, f0_method, crepe_hop_length,
- protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
- output_format],
- outputs=[ai_cover])
- clear_btn.click(lambda: [0, 0, 0, 0, 0.5, 3, 0.25, 0.33, 'rmvpe', 128, 0, 0.15, 0.2, 0.8, 0.7, 'mp3', None],
- outputs=[pitch, main_gain, backup_gain, inst_gain, index_rate, filter_radius, rms_mix_rate,
- protect, f0_method, crepe_hop_length, pitch_all, reverb_rm_size, reverb_wet,
- reverb_dry, reverb_damping, output_format, ai_cover])
-
- # Download tab
- with gr.Tab('Download model'):
-
- with gr.Tab('From HuggingFace/Pixeldrain URL'):
- with gr.Row():
- model_zip_link = gr.Text(label='Download link to model', info='Should be a zip file containing a .pth model file and an optional .index file.')
- model_name = gr.Text(label='Name your model', info='Give your new model a unique name from your other voice models.')
-
- with gr.Row():
- download_btn = gr.Button('Download 🌐', variant='primary', scale=19)
- dl_output_message = gr.Text(label='Output Message', interactive=False, scale=20)
-
- download_btn.click(download_online_model, inputs=[model_zip_link, model_name], outputs=dl_output_message)
-
- gr.Markdown('## Input Examples')
- gr.Examples(
- [
- ['https://huggingface.co/phant0m4r/LiSA/resolve/main/LiSA.zip', 'Lisa'],
- ['https://pixeldrain.com/u/3tJmABXA', 'Gura'],
- ['https://huggingface.co/Kit-Lemonfoot/kitlemonfoot_rvc_models/resolve/main/AZKi%20(Hybrid).zip', 'Azki']
- ],
- [model_zip_link, model_name],
- [],
- download_online_model,
- )
-
- with gr.Tab('From Public Index'):
-
- gr.Markdown('## How to use')
- gr.Markdown('- Click Initialize public models table')
- gr.Markdown('- Filter models using tags or search bar')
- gr.Markdown('- Select a row to autofill the download link and model name')
- gr.Markdown('- Click Download')
-
- with gr.Row():
- pub_zip_link = gr.Text(label='Download link to model')
- pub_model_name = gr.Text(label='Model name')
-
- with gr.Row():
- download_pub_btn = gr.Button('Download 🌐', variant='primary', scale=19)
- pub_dl_output_message = gr.Text(label='Output Message', interactive=False, scale=20)
-
- filter_tags = gr.CheckboxGroup(value=[], label='Show voice models with tags', choices=[])
- search_query = gr.Text(label='Search')
- load_public_models_button = gr.Button(value='Initialize public models table', variant='primary')
-
- public_models_table = gr.DataFrame(value=[], headers=['Model Name', 'Description', 'Credit', 'URL', 'Tags'], label='Available Public Models', interactive=False)
- public_models_table.select(pub_dl_autofill, inputs=[public_models_table], outputs=[pub_zip_link, pub_model_name])
- load_public_models_button.click(load_public_models, outputs=[public_models_table, filter_tags])
- search_query.change(filter_models, inputs=[filter_tags, search_query], outputs=public_models_table)
- filter_tags.change(filter_models, inputs=[filter_tags, search_query], outputs=public_models_table)
- download_pub_btn.click(download_online_model, inputs=[pub_zip_link, pub_model_name], outputs=pub_dl_output_message)
-
- # Upload tab
- with gr.Tab('Upload model'):
- gr.Markdown('## Upload locally trained RVC v2 model and index file')
- gr.Markdown('- Find model file (weights folder) and optional index file (logs/[name] folder)')
- gr.Markdown('- Compress files into zip file')
- gr.Markdown('- Upload zip file and give unique name for voice')
- gr.Markdown('- Click Upload model')
-
- with gr.Row():
- with gr.Column():
- zip_file = gr.File(label='Zip file')
-
- local_model_name = gr.Text(label='Model name')
-
- with gr.Row():
- model_upload_button = gr.Button('Upload model', variant='primary', scale=19)
- local_upload_output_message = gr.Text(label='Output Message', interactive=False, scale=20)
- model_upload_button.click(upload_local_model, inputs=[zip_file, local_model_name], outputs=local_upload_output_message)
-
- app.launch(
- share=args.share_enabled,
- enable_queue=True,
- server_name=None if not args.listen else (args.listen_host or '0.0.0.0'),
- server_port=args.listen_port,
- )
diff --git a/spaces/Lbin123/Lbingo/tests/kblob.ts b/spaces/Lbin123/Lbingo/tests/kblob.ts
deleted file mode 100644
index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/tests/kblob.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import FormData from 'form-data'
-
-import { fetch } from '@/lib/isomorphic'
-
-const formData = new FormData()
-
-const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}}
-
-formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest))
-
-
-fetch('https://bing.vcanbb.top/images/kblob',
- {
- method: 'POST',
- body: formData.getBuffer(),
- headers: {
- "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"",
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": "\"Windows\"",
- "Referer": "https://bing.vcanbb.top/web/index.html",
- "Referrer-Policy": "origin-when-cross-origin",
- ...formData.getHeaders()
- }
-
- }
-).then(res => res.text())
-.then(res => console.log('res', res))
diff --git a/spaces/Lianjd/stock_dashboard/backtrader/feeds/chainer.py b/spaces/Lianjd/stock_dashboard/backtrader/feeds/chainer.py
deleted file mode 100644
index 24b1478e9a0831f1e6b51c881f9cbf5965c0fc87..0000000000000000000000000000000000000000
--- a/spaces/Lianjd/stock_dashboard/backtrader/feeds/chainer.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8; py-indent-offset:4 -*-
-###############################################################################
-#
-# Copyright (C) 2015-2020 Daniel Rodriguez
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-###############################################################################
-from __future__ import (absolute_import, division, print_function,
- unicode_literals)
-
-
-from datetime import datetime
-
-import backtrader as bt
-from backtrader.utils.py3 import range
-
-
-class MetaChainer(bt.DataBase.__class__):
- def __init__(cls, name, bases, dct):
- '''Class has already been created ... register'''
- # Initialize the class
- super(MetaChainer, cls).__init__(name, bases, dct)
-
- def donew(cls, *args, **kwargs):
- '''Intercept const. to copy timeframe/compression from 1st data'''
- # Create the object and set the params in place
- _obj, args, kwargs = super(MetaChainer, cls).donew(*args, **kwargs)
-
- if args:
- _obj.p.timeframe = args[0]._timeframe
- _obj.p.compression = args[0]._compression
-
- return _obj, args, kwargs
-
-
-class Chainer(bt.with_metaclass(MetaChainer, bt.DataBase)):
- '''Class that chains datas'''
-
- def islive(self):
- '''Returns ``True`` to notify ``Cerebro`` that preloading and runonce
- should be deactivated'''
- return True
-
- def __init__(self, *args):
- self._args = args
-
- def start(self):
- super(Chainer, self).start()
- for d in self._args:
- d.setenvironment(self._env)
- d._start()
-
- # put the references in a separate list to have pops
- self._ds = list(self._args)
- self._d = self._ds.pop(0) if self._ds else None
- self._lastdt = datetime.min
-
- def stop(self):
- super(Chainer, self).stop()
- for d in self._args:
- d.stop()
-
- def get_notifications(self):
- return [] if self._d is None else self._d.get_notifications()
-
- def _gettz(self):
- '''To be overriden by subclasses which may auto-calculate the
- timezone'''
- if self._args:
- return self._args[0]._gettz()
- return bt.utils.date.Localizer(self.p.tz)
-
- def _load(self):
- while self._d is not None:
- if not self._d.next(): # no values from current data source
- self._d = self._ds.pop(0) if self._ds else None
- continue
-
- # Cannot deliver a date equal or less than an alredy delivered
- dt = self._d.datetime.datetime()
- if dt <= self._lastdt:
- continue
-
- self._lastdt = dt
-
- for i in range(self._d.size()):
- self.lines[i][0] = self._d.lines[i][0]
-
- return True
-
- # Out of the loop -> self._d is None, no data feed to return from
- return False
diff --git a/spaces/LouisSanna/reco_fish/app.py b/spaces/LouisSanna/reco_fish/app.py
deleted file mode 100644
index a0dff9e7662222fd4278dcef99077e28dbba1e21..0000000000000000000000000000000000000000
--- a/spaces/LouisSanna/reco_fish/app.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import torch
-import gradio as gr
-from PIL import Image
-from torchvision import transforms
-import timm
-
-classes = ['la_badèche', 'la_blennie_coiffée', 'la_blennie_de_caneva', 'la_blennie_de_roux', 'la_blennie_de_zvonimir', 'la_blennie_diabolo', 'la_blennie_gattorugine', 'la_blennie_palmicorne', 'la_blennie_paon', 'la_blennie_pilicorne', 'la_blennie_sphynx', 'la_bogue', 'la_bécune_bouche_jaune', 'la_canthare', 'la_carangue_dentue', 'la_castagnole', 'la_coquette', 'la_dorade_royale', 'la_girelle', 'la_girelle-paon', 'la_gonnelle', 'la_grande_seriole', 'la_grande_vive', 'la_morue', 'la_mostelle_de_roche', 'la_murène_commune', 'la_palomine', 'la_petite_rascasse_rouge', 'la_rascasse_brune', 'la_rascasse_de_madère', 'la_saupe', 'la_sole_commune', 'la_vielle', 'le_baliste_commun', 'le_bar_commun', 'le_barbier_commun', 'le_chabot_buffle', 'le_chabot_commun', 'le_chapon', 'le_congre', 'le_corb', 'le_crénilabre_cendré', 'le_crénilabre_de_melops', 'le_crénilabre_méditerranéen', 'le_crénilabre_ocellé', 'le_crénilabre_tanche', 'le_crénilabre_à_5_taches', 'le_cténolabre', 'le_denté_commun', 'le_dragonnet_lyre', 'le_flet', 'le_gobie_de_sarato', 'le_gobie_marbré', 'le_gobie_nageur', 'le_gobie_noir', 'le_gobie_paganel', 'le_gobie_svelte', 'le_gobie_tacheté', 'le_gobie_varié', 'le_gobie_à_bouche_rouge', 'le_gobie_à_grosse_tête', 'le_gobie_à_tête_jaune', 'le_grondin-perlon', 'le_grondin_camard', 'le_joel', 'le_marbré', 'le_merle', 'le_mordocet', 'le_mulet_labeon', 'le_mulet_lippu', 'le_mérou_brun', 'le_nérophis_lombricoïde', 'le_poisson-lézard_rayé', 'le_poisson-perroquet', 'le_poisson_lapin_à_queue_tronquée', 'le_poisson_lapin_à_ventre_strié', 'le_rason', 'le_rombou', 'le_rouget-barbet_de_roche', 'le_rouget-barbet_de_vase', 'le_sar_commun', 'le_sar_à_grosses_lèvres', 'le_sar_à_museau_pointu', 'le_sar_à_tête_noire', 'le_serran_-chevrette', 'le_serran_ecriture', 'le_siphonostome_atlantique', 'le_sparaillon', 'le_sublet', 'le_syngnathe_aiguille', 'le_tripterygion_rouge', 'le_triptérygion_jaune', 'le_tryptérigion_nain', 'l’anguille', 'l’apogon_commun', 'l’hippocampe_moucheté', 'l’hippocampe_à_nez_court', 'l’hippocampe_à_ramules', 'l’oblade', 'l’orphie', 'l’épinoche']
-
-# Load the model
-model_path = "model.pth"
-model = timm.create_model('resnet34', pretrained=False, num_classes=101)
-model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
-model = model.to('cpu')
-model.eval()
-
-
-# Preprocessing function
-def preprocess(image):
- _preprocess = transforms.Compose([
- transforms.Resize((224, 224)),
- transforms.ToTensor(),
- transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
- ])
- return _preprocess(image)
-
-
-# Inference function
-def fish_classifier(img_input):
- img = Image.fromarray(img_input.astype('uint8'), 'RGB')
- input_tensor = preprocess(img).unsqueeze(0)
-
- with torch.no_grad():
- pred = model(input_tensor)[0]
- pred = torch.nn.functional.softmax(pred, dim=0)
- return {classes[i]: float(pred[i]) for i in range(len(classes))}
-
-
-# Gradio interface
-gr.Interface(
- fn=fish_classifier,
- inputs=gr.Image(shape=(224, 224)),
- outputs=gr.Label(num_top_classes=5),
- examples=["daurade.jpg"]
-).launch()
\ No newline at end of file
diff --git a/spaces/LucasCodeBreak/MusicGen/tests/modules/test_transformer.py b/spaces/LucasCodeBreak/MusicGen/tests/modules/test_transformer.py
deleted file mode 100644
index ff7dfe4c2de05112aec55ddea9c8fd978668f80b..0000000000000000000000000000000000000000
--- a/spaces/LucasCodeBreak/MusicGen/tests/modules/test_transformer.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from itertools import product
-
-import pytest
-import torch
-
-from audiocraft.modules.transformer import (
- StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend)
-
-
-def test_transformer_causal_streaming():
- torch.manual_seed(1234)
-
- for context, custom in product([None, 10], [False, True]):
- # Test that causality and receptive fields are properly handled.
- # looking at the gradients
- tr = StreamingTransformer(
- 16, 4, 1 if context else 2,
- causal=True, past_context=context, custom=custom,
- dropout=0.)
- steps = 20
- for k in [0, 10, 15, 19]:
- x = torch.randn(4, steps, 16, requires_grad=True)
- y = tr(x)
- y[:, k].abs().sum().backward()
- if k + 1 < steps:
- assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm()
- assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm()
- if context is not None and k > context:
- limit = k - context - 1
- assert torch.allclose(x.grad[:, :limit],
- torch.tensor(0.)), x.grad[:, :limit].norm()
-
- # Now check that streaming gives the same result at batch eval.
- x = torch.randn(4, steps, 16)
- y = tr(x)
- ys = []
- with tr.streaming():
- for k in range(steps):
- chunk = x[:, k:k + 1, :]
- ys.append(tr(chunk))
- y_stream = torch.cat(ys, dim=1)
- delta = torch.norm(y_stream - y) / torch.norm(y)
- assert delta < 1e-6, delta
-
-
-def test_transformer_vs_pytorch():
- torch.manual_seed(1234)
- # Check that in the non causal setting, we get the same result as
- # PyTorch Transformer encoder.
- for custom in [False, True]:
- tr = StreamingTransformer(
- 16, 4, 2,
- causal=False, custom=custom, dropout=0., positional_scale=0.)
- layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True)
- tr_ref = torch.nn.TransformerEncoder(layer, 2)
- tr.load_state_dict(tr_ref.state_dict())
-
- x = torch.randn(4, 20, 16)
- y = tr(x)
- y2 = tr_ref(x)
- delta = torch.norm(y2 - y) / torch.norm(y)
- assert delta < 1e-6, delta
-
-
-def test_streaming_api():
- tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.)
- tr.eval()
- steps = 12
- x = torch.randn(1, steps, 16)
-
- with torch.no_grad():
- with tr.streaming():
- _ = tr(x[:, :1])
- state = {k: v.clone() for k, v in tr.get_streaming_state().items()}
- y = tr(x[:, 1:2])
- tr.set_streaming_state(state)
- y2 = tr(x[:, 1:2])
- assert torch.allclose(y, y2), (y - y2).norm()
- assert tr.flush() is None
-
-
-def test_memory_efficient():
- for backend in ['torch', 'xformers']:
- torch.manual_seed(1234)
- set_efficient_attention_backend(backend)
-
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1)
- tr_mem_efficient = StreamingTransformer(
- 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1)
- tr_mem_efficient.load_state_dict(tr.state_dict())
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- with torch.no_grad():
- y = tr(x)
- y2 = tr_mem_efficient(x)
- assert torch.allclose(y, y2), ((y - y2).norm(), backend)
-
-
-def test_attention_as_float32():
- torch.manual_seed(1234)
- cases = [
- {'custom': True},
- {'custom': False},
- ]
- for case in cases:
- tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case)
- tr_float32 = StreamingTransformer(
- 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case)
- if not case['custom']:
- # we are not using autocast here because it doesn't really
- # work as expected on CPU, so we have to manually cast the weights of the MHA.
- for layer in tr_float32.layers:
- layer.self_attn.mha.to(torch.float32)
- tr_float32.load_state_dict(tr.state_dict())
- steps = 12
- x = torch.randn(3, steps, 16, dtype=torch.bfloat16)
-
- with torch.no_grad():
- y = tr(x)
- y2 = tr_float32(x)
- assert not torch.allclose(y, y2), (y - y2).norm()
-
-
-@torch.no_grad()
-def test_streaming_memory_efficient():
- for backend in ['torch', 'xformers']:
- torch.manual_seed(1234)
- set_efficient_attention_backend(backend)
- tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True)
- tr_mem_efficient = StreamingTransformer(
- 16, 4, 2, dropout=0., memory_efficient=True, causal=True)
- tr.load_state_dict(tr_mem_efficient.state_dict())
- tr.eval()
- tr_mem_efficient.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- ref = tr(x)
-
- with tr_mem_efficient.streaming():
- outs = []
- # frame_sizes = [2] + [1] * (steps - 2)
- frame_sizes = [1] * steps
-
- for frame_size in frame_sizes:
- frame = x[:, :frame_size]
- x = x[:, frame_size:]
- outs.append(tr_mem_efficient(frame))
-
- out = torch.cat(outs, dim=1)
- delta = torch.norm(out - ref) / torch.norm(out)
- assert delta < 1e-6, delta
-
-
-def test_cross_attention():
- torch.manual_seed(1234)
- for norm_first in [True, False]:
- m = StreamingTransformer(
- 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True)
- m_cross = StreamingTransformer(
- 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True)
- m_cross.load_state_dict(m.state_dict(), strict=False)
- x = torch.randn(2, 5, 16)
- cross_x = torch.randn(2, 3, 16)
- y_ref = m(x)
- y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x)
- # With norm_first, the two should be exactly yhe same,
- # but with norm_first=False, we get 2 normalization in a row
- # and the epsilon value leads to a tiny change.
- atol = 0. if norm_first else 1e-6
- print((y_ref - y_cross_zero).norm() / y_ref.norm())
- assert torch.allclose(y_ref, y_cross_zero, atol=atol)
-
- # We now expect a difference even with a generous atol of 1e-2.
- y_cross = m_cross(x, cross_attention_src=cross_x)
- assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2)
-
- with pytest.raises(AssertionError):
- _ = m_cross(x)
- _ = m(x, cross_attention_src=cross_x)
-
-
-def test_cross_attention_compat():
- torch.manual_seed(1234)
- num_heads = 2
- dim = num_heads * 64
- with pytest.raises(AssertionError):
- StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True)
-
- cross_attn = StreamingMultiheadAttention(
- dim, num_heads, dropout=0, cross_attention=True, custom=True)
- ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True)
-
- # We can load the regular attention state dict
- # so we have compat when loading old checkpoints.
- cross_attn.load_state_dict(ref_attn.state_dict())
-
- queries = torch.randn(3, 7, dim)
- keys = torch.randn(3, 9, dim)
- values = torch.randn(3, 9, dim)
-
- y = cross_attn(queries, keys, values)[0]
- y_ref = ref_attn(queries, keys, values)[0]
- assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm()
-
- # Now let's check that streaming is working properly.
- with cross_attn.streaming():
- ys = []
- for step in range(queries.shape[1]):
- ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0])
- y_streaming = torch.cat(ys, dim=1)
- assert torch.allclose(y_streaming, y, atol=1e-7)
-
-
-def test_repeat_kv():
- torch.manual_seed(1234)
- num_heads = 8
- kv_repeat = 4
- dim = num_heads * 64
- with pytest.raises(AssertionError):
- mha = StreamingMultiheadAttention(
- dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True)
- mha = StreamingMultiheadAttention(
- dim, num_heads, causal=True, kv_repeat=kv_repeat)
- mha = StreamingMultiheadAttention(
- dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True)
- x = torch.randn(4, 18, dim)
- y = mha(x, x, x)[0]
- assert x.shape == y.shape
-
-
-def test_qk_layer_norm():
- torch.manual_seed(1234)
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False)
- steps = 12
- x = torch.randn(3, steps, 16)
- y = tr(x)
-
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True)
- z = torch.randn(3, 21, 16)
- y = tr(x, cross_attention_src=z)
- assert y.shape == x.shape
diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/Create_Bigfile.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/data/Create_Bigfile.py
deleted file mode 100644
index 2df6ef3ceec4a80903410901fd9656d9707af84e..0000000000000000000000000000000000000000
--- a/spaces/MCkernick/Image_Restoration_Colorization/Global/data/Create_Bigfile.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import os
-import struct
-from PIL import Image
-
-IMG_EXTENSIONS = [
- '.jpg', '.JPG', '.jpeg', '.JPEG',
- '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
-]
-
-
-def is_image_file(filename):
- return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
-
-
-def make_dataset(dir):
- images = []
- assert os.path.isdir(dir), '%s is not a valid directory' % dir
-
- for root, _, fnames in sorted(os.walk(dir)):
- for fname in fnames:
- if is_image_file(fname):
- #print(fname)
- path = os.path.join(root, fname)
- images.append(path)
-
- return images
-
-### Modify these 3 lines in your own environment
-indir="/home/ziyuwan/workspace/data/temp_old"
-target_folders=['VOC','Real_L_old','Real_RGB_old']
-out_dir ="/home/ziyuwan/workspace/data/temp_old"
-###
-
-if os.path.exists(out_dir) is False:
- os.makedirs(out_dir)
-
-#
-for target_folder in target_folders:
- curr_indir = os.path.join(indir, target_folder)
- curr_out_file = os.path.join(os.path.join(out_dir, '%s.bigfile'%(target_folder)))
- image_lists = make_dataset(curr_indir)
- image_lists.sort()
- with open(curr_out_file, 'wb') as wfid:
- # write total image number
- wfid.write(struct.pack('i', len(image_lists)))
- for i, img_path in enumerate(image_lists):
- # write file name first
- img_name = os.path.basename(img_path)
- img_name_bytes = img_name.encode('utf-8')
- wfid.write(struct.pack('i', len(img_name_bytes)))
- wfid.write(img_name_bytes)
- #
- # # write image data in
- with open(img_path, 'rb') as img_fid:
- img_bytes = img_fid.read()
- wfid.write(struct.pack('i', len(img_bytes)))
- wfid.write(img_bytes)
-
- if i % 1000 == 0:
- print('write %d images done' % i)
\ No newline at end of file
diff --git a/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/cleaner.py b/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/cleaner.py
deleted file mode 100644
index 3ba3739816aabbe16663b68c74fcda0588c14bab..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/BangDream-Bert-VITS2/text/cleaner.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from text import chinese, japanese, cleaned_text_to_sequence
-
-
-language_module_map = {"ZH": chinese, "JP": japanese}
-
-
-def clean_text(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- return norm_text, phones, tones, word2ph
-
-
-def clean_text_bert(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- bert = language_module.get_bert_feature(norm_text, word2ph)
- return phones, tones, bert
-
-
-def text_to_sequence(text, language):
- norm_text, phones, tones, word2ph = clean_text(text, language)
- return cleaned_text_to_sequence(phones, tones, language)
-
-
-if __name__ == "__main__":
- pass
diff --git a/spaces/MathysL/AutoGPT4/autogpt/commands/git_operations.py b/spaces/MathysL/AutoGPT4/autogpt/commands/git_operations.py
deleted file mode 100644
index 028f3b8da44c85e01d20ccc5d4a5fa72c759008b..0000000000000000000000000000000000000000
--- a/spaces/MathysL/AutoGPT4/autogpt/commands/git_operations.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Git operations for autogpt"""
-import git
-
-from autogpt.config import Config
-from autogpt.workspace import path_in_workspace
-
-CFG = Config()
-
-
-def clone_repository(repo_url: str, clone_path: str) -> str:
- """Clone a GitHub repository locally
-
- Args:
- repo_url (str): The URL of the repository to clone
- clone_path (str): The path to clone the repository to
-
- Returns:
- str: The result of the clone operation"""
- split_url = repo_url.split("//")
- auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
- safe_clone_path = path_in_workspace(clone_path)
- try:
- git.Repo.clone_from(auth_repo_url, safe_clone_path)
- return f"""Cloned {repo_url} to {safe_clone_path}"""
- except Exception as e:
- return f"Error: {str(e)}"
diff --git a/spaces/MercurialAi/Embeddings_Chat/README.md b/spaces/MercurialAi/Embeddings_Chat/README.md
deleted file mode 100644
index 601b594d7c7f4e5def8bd59478a9520565d28045..0000000000000000000000000000000000000000
--- a/spaces/MercurialAi/Embeddings_Chat/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Embeddings Chat
-emoji: 🩺
-colorFrom: purple
-colorTo: blue
-sdk: gradio
-sdk_version: 3.40.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/ddpm_original.py b/spaces/MirageML/sjc/sd1/ldm/models/diffusion/ddpm_original.py
deleted file mode 100644
index bbedd04cfd6f736ac066434a75618b9ba5125be7..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/ddpm_original.py
+++ /dev/null
@@ -1,1445 +0,0 @@
-"""
-wild mixture of
-https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
-https://github.com/CompVis/taming-transformers
--- merci
-"""
-
-import torch
-import torch.nn as nn
-import numpy as np
-import pytorch_lightning as pl
-from torch.optim.lr_scheduler import LambdaLR
-from einops import rearrange, repeat
-from contextlib import contextmanager
-from functools import partial
-from tqdm import tqdm
-from torchvision.utils import make_grid
-from pytorch_lightning.utilities.distributed import rank_zero_only
-
-from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
-from ldm.modules.ema import LitEma
-from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
-from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
-from ldm.models.diffusion.ddim import DDIMSampler
-
-
-__conditioning_keys__ = {'concat': 'c_concat',
- 'crossattn': 'c_crossattn',
- 'adm': 'y'}
-
-
-def disabled_train(self, mode=True):
- """Overwrite model.train with this function to make sure train/eval mode
- does not change anymore."""
- return self
-
-
-def uniform_on_device(r1, r2, shape, device):
- return (r1 - r2) * torch.rand(*shape, device=device) + r2
-
-
-class DDPM(pl.LightningModule):
- # classic DDPM with Gaussian diffusion, in image space
- def __init__(self,
- unet_config,
- timesteps=1000,
- beta_schedule="linear",
- loss_type="l2",
- ckpt_path=None,
- ignore_keys=[],
- load_only_unet=False,
- monitor="val/loss",
- use_ema=True,
- first_stage_key="image",
- image_size=256,
- channels=3,
- log_every_t=100,
- clip_denoised=True,
- linear_start=1e-4,
- linear_end=2e-2,
- cosine_s=8e-3,
- given_betas=None,
- original_elbo_weight=0.,
- v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
- l_simple_weight=1.,
- conditioning_key=None,
- parameterization="eps", # all assuming fixed variance schedules
- scheduler_config=None,
- use_positional_encodings=False,
- learn_logvar=False,
- logvar_init=0.,
- ):
- super().__init__()
- assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
- self.parameterization = parameterization
- print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
- self.cond_stage_model = None
- self.clip_denoised = clip_denoised
- self.log_every_t = log_every_t
- self.first_stage_key = first_stage_key
- self.image_size = image_size # try conv?
- self.channels = channels
- self.use_positional_encodings = use_positional_encodings
- self.model = DiffusionWrapper(unet_config, conditioning_key)
- count_params(self.model, verbose=True)
- self.use_ema = use_ema
- if self.use_ema:
- self.model_ema = LitEma(self.model)
- print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
-
- self.use_scheduler = scheduler_config is not None
- if self.use_scheduler:
- self.scheduler_config = scheduler_config
-
- self.v_posterior = v_posterior
- self.original_elbo_weight = original_elbo_weight
- self.l_simple_weight = l_simple_weight
-
- if monitor is not None:
- self.monitor = monitor
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
-
- self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
- linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
-
- self.loss_type = loss_type
-
- self.learn_logvar = learn_logvar
- self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
- if self.learn_logvar:
- self.logvar = nn.Parameter(self.logvar, requires_grad=True)
-
-
- def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if exists(given_betas):
- betas = given_betas
- else:
- betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
- cosine_s=cosine_s)
- alphas = 1. - betas
- alphas_cumprod = np.cumprod(alphas, axis=0)
- alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
-
- timesteps, = betas.shape
- self.num_timesteps = int(timesteps)
- self.linear_start = linear_start
- self.linear_end = linear_end
- assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
-
- to_torch = partial(torch.tensor, dtype=torch.float32)
-
- self.register_buffer('betas', to_torch(betas))
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
- self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
-
- # calculations for posterior q(x_{t-1} | x_t, x_0)
- posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
- 1. - alphas_cumprod) + self.v_posterior * betas
- # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
- self.register_buffer('posterior_variance', to_torch(posterior_variance))
- # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
- self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
- self.register_buffer('posterior_mean_coef1', to_torch(
- betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
- self.register_buffer('posterior_mean_coef2', to_torch(
- (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
-
- if self.parameterization == "eps":
- lvlb_weights = self.betas ** 2 / (
- 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
- elif self.parameterization == "x0":
- lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
- else:
- raise NotImplementedError("mu not supported")
- # TODO how to choose this term
- lvlb_weights[0] = lvlb_weights[1]
- self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
- assert not torch.isnan(self.lvlb_weights).all()
-
- @contextmanager
- def ema_scope(self, context=None):
- if self.use_ema:
- self.model_ema.store(self.model.parameters())
- self.model_ema.copy_to(self.model)
- if context is not None:
- print(f"{context}: Switched to EMA weights")
- try:
- yield None
- finally:
- if self.use_ema:
- self.model_ema.restore(self.model.parameters())
- if context is not None:
- print(f"{context}: Restored training weights")
-
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
- sd = torch.load(path, map_location="cpu")
- if "state_dict" in list(sd.keys()):
- sd = sd["state_dict"]
- keys = list(sd.keys())
- for k in keys:
- for ik in ignore_keys:
- if k.startswith(ik):
- print("Deleting key {} from state_dict.".format(k))
- del sd[k]
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
- sd, strict=False)
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
- if len(missing) > 0:
- print(f"Missing Keys: {missing}")
- if len(unexpected) > 0:
- print(f"Unexpected Keys: {unexpected}")
-
- def q_mean_variance(self, x_start, t):
- """
- Get the distribution q(x_t | x_0).
- :param x_start: the [N x C x ...] tensor of noiseless inputs.
- :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
- :return: A tuple (mean, variance, log_variance), all of x_start's shape.
- """
- mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
- variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
- log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
- return mean, variance, log_variance
-
- def predict_start_from_noise(self, x_t, t, noise):
- return (
- extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
- )
-
- def q_posterior(self, x_start, x_t, t):
- posterior_mean = (
- extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
- extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
- )
- posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
- posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
- return posterior_mean, posterior_variance, posterior_log_variance_clipped
-
- def p_mean_variance(self, x, t, clip_denoised: bool):
- model_out = self.model(x, t)
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
-
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
- b, *_, device = *x.shape, x.device
- model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
- noise = noise_like(x.shape, device, repeat_noise)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def p_sample_loop(self, shape, return_intermediates=False):
- device = self.betas.device
- b = shape[0]
- img = torch.randn(shape, device=device)
- intermediates = [img]
- for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
- img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
- clip_denoised=self.clip_denoised)
- if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
- intermediates.append(img)
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, batch_size=16, return_intermediates=False):
- image_size = self.image_size
- channels = self.channels
- return self.p_sample_loop((batch_size, channels, image_size, image_size),
- return_intermediates=return_intermediates)
-
- def q_sample(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
-
- def get_loss(self, pred, target, mean=True):
- if self.loss_type == 'l1':
- loss = (target - pred).abs()
- if mean:
- loss = loss.mean()
- elif self.loss_type == 'l2':
- if mean:
- loss = torch.nn.functional.mse_loss(target, pred)
- else:
- loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
- else:
- raise NotImplementedError("unknown loss type '{loss_type}'")
-
- return loss
-
- def p_losses(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_out = self.model(x_noisy, t)
-
- loss_dict = {}
- if self.parameterization == "eps":
- target = noise
- elif self.parameterization == "x0":
- target = x_start
- else:
- raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
-
- loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
-
- log_prefix = 'train' if self.training else 'val'
-
- loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
- loss_simple = loss.mean() * self.l_simple_weight
-
- loss_vlb = (self.lvlb_weights[t] * loss).mean()
- loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
-
- loss = loss_simple + self.original_elbo_weight * loss_vlb
-
- loss_dict.update({f'{log_prefix}/loss': loss})
-
- return loss, loss_dict
-
- def forward(self, x, *args, **kwargs):
- # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
- # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- return self.p_losses(x, t, *args, **kwargs)
-
- def get_input(self, batch, k):
- x = batch[k]
- if len(x.shape) == 3:
- x = x[..., None]
- x = rearrange(x, 'b h w c -> b c h w')
- x = x.to(memory_format=torch.contiguous_format).float()
- return x
-
- def shared_step(self, batch):
- x = self.get_input(batch, self.first_stage_key)
- loss, loss_dict = self(x)
- return loss, loss_dict
-
- def training_step(self, batch, batch_idx):
- loss, loss_dict = self.shared_step(batch)
-
- self.log_dict(loss_dict, prog_bar=True,
- logger=True, on_step=True, on_epoch=True)
-
- self.log("global_step", self.global_step,
- prog_bar=True, logger=True, on_step=True, on_epoch=False)
-
- if self.use_scheduler:
- lr = self.optimizers().param_groups[0]['lr']
- self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
-
- return loss
-
- @torch.no_grad()
- def validation_step(self, batch, batch_idx):
- _, loss_dict_no_ema = self.shared_step(batch)
- with self.ema_scope():
- _, loss_dict_ema = self.shared_step(batch)
- loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
- self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
- self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
-
- def on_train_batch_end(self, *args, **kwargs):
- if self.use_ema:
- self.model_ema(self.model)
-
- def _get_rows_from_list(self, samples):
- n_imgs_per_row = len(samples)
- denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
- log = dict()
- x = self.get_input(batch, self.first_stage_key)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- x = x.to(self.device)[:N]
- log["inputs"] = x
-
- # get diffusion row
- diffusion_row = list()
- x_start = x[:n_row]
-
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(x_start)
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- diffusion_row.append(x_noisy)
-
- log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
-
- if sample:
- # get denoise row
- with self.ema_scope("Plotting"):
- samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
-
- log["samples"] = samples
- log["denoise_row"] = self._get_rows_from_list(denoise_row)
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.learn_logvar:
- params = params + [self.logvar]
- opt = torch.optim.AdamW(params, lr=lr)
- return opt
-
-
-class LatentDiffusion(DDPM):
- """main class"""
- def __init__(self,
- first_stage_config,
- cond_stage_config,
- num_timesteps_cond=None,
- cond_stage_key="image",
- cond_stage_trainable=False,
- concat_mode=True,
- cond_stage_forward=None,
- conditioning_key=None,
- scale_factor=1.0,
- scale_by_std=False,
- *args, **kwargs):
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
- self.scale_by_std = scale_by_std
- assert self.num_timesteps_cond <= kwargs['timesteps']
- # for backwards compatibility after implementation of DiffusionWrapper
- if conditioning_key is None:
- conditioning_key = 'concat' if concat_mode else 'crossattn'
- if cond_stage_config == '__is_unconditional__':
- conditioning_key = None
- ckpt_path = kwargs.pop("ckpt_path", None)
- ignore_keys = kwargs.pop("ignore_keys", [])
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
- self.concat_mode = concat_mode
- self.cond_stage_trainable = cond_stage_trainable
- self.cond_stage_key = cond_stage_key
- try:
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
- self.num_downs = 0
- if not scale_by_std:
- self.scale_factor = scale_factor
- else:
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
- self.instantiate_first_stage(first_stage_config)
- self.instantiate_cond_stage(cond_stage_config)
- self.cond_stage_forward = cond_stage_forward
- self.clip_denoised = False
- self.bbox_tokenizer = None
-
- self.restarted_from_ckpt = False
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys)
- self.restarted_from_ckpt = True
-
- def make_cond_schedule(self, ):
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
- self.cond_ids[:self.num_timesteps_cond] = ids
-
- @rank_zero_only
- @torch.no_grad()
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
- # only for very first batch
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
- # set rescale weight to 1./std of encodings
- print("### USING STD-RESCALING ###")
- x = super().get_input(batch, self.first_stage_key)
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
- del self.scale_factor
- self.register_buffer('scale_factor', 1. / z.flatten().std())
- print(f"setting self.scale_factor to {self.scale_factor}")
- print("### USING STD-RESCALING ###")
-
- def register_schedule(self,
- given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
-
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
- if self.shorten_cond_schedule:
- self.make_cond_schedule()
-
- def instantiate_first_stage(self, config):
- model = instantiate_from_config(config)
- self.first_stage_model = model.eval()
- self.first_stage_model.train = disabled_train
- for param in self.first_stage_model.parameters():
- param.requires_grad = False
-
- def instantiate_cond_stage(self, config):
- if not self.cond_stage_trainable:
- if config == "__is_first_stage__":
- print("Using first stage also as cond stage.")
- self.cond_stage_model = self.first_stage_model
- elif config == "__is_unconditional__":
- print(f"Training {self.__class__.__name__} as an unconditional model.")
- self.cond_stage_model = None
- # self.be_unconditional = True
- else:
- model = instantiate_from_config(config)
- self.cond_stage_model = model.eval()
- self.cond_stage_model.train = disabled_train
- for param in self.cond_stage_model.parameters():
- param.requires_grad = False
- else:
- assert config != '__is_first_stage__'
- assert config != '__is_unconditional__'
- model = instantiate_from_config(config)
- self.cond_stage_model = model
-
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
- denoise_row = []
- for zd in tqdm(samples, desc=desc):
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
- force_not_quantize=force_no_decoder_quantization))
- n_imgs_per_row = len(denoise_row)
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- def get_first_stage_encoding(self, encoder_posterior):
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
- z = encoder_posterior.sample()
- elif isinstance(encoder_posterior, torch.Tensor):
- z = encoder_posterior
- else:
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
- return self.scale_factor * z
-
- def get_learned_conditioning(self, c):
- if self.cond_stage_forward is None:
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
- c = self.cond_stage_model.encode(c)
- if isinstance(c, DiagonalGaussianDistribution):
- c = c.mode()
- else:
- c = self.cond_stage_model(c)
- else:
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
- return c
-
- def meshgrid(self, h, w):
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
-
- arr = torch.cat([y, x], dim=-1)
- return arr
-
- def delta_border(self, h, w):
- """
- :param h: height
- :param w: width
- :return: normalized distance to image border,
- wtith min distance = 0 at border and max dist = 0.5 at image center
- """
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
- arr = self.meshgrid(h, w) / lower_right_corner
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
- return edge_dist
-
- def get_weighting(self, h, w, Ly, Lx, device):
- weighting = self.delta_border(h, w)
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
- self.split_input_params["clip_max_weight"], )
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
-
- if self.split_input_params["tie_braker"]:
- L_weighting = self.delta_border(Ly, Lx)
- L_weighting = torch.clip(L_weighting,
- self.split_input_params["clip_min_tie_weight"],
- self.split_input_params["clip_max_tie_weight"])
-
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
- weighting = weighting * L_weighting
- return weighting
-
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
- """
- :param x: img of size (bs, c, h, w)
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
- """
- bs, nc, h, w = x.shape
-
- # number of crops in image
- Ly = (h - kernel_size[0]) // stride[0] + 1
- Lx = (w - kernel_size[1]) // stride[1] + 1
-
- if uf == 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
-
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
-
- elif uf > 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
- dilation=1, padding=0,
- stride=(stride[0] * uf, stride[1] * uf))
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
-
- elif df > 1 and uf == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
- dilation=1, padding=0,
- stride=(stride[0] // df, stride[1] // df))
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
-
- else:
- raise NotImplementedError
-
- return fold, unfold, normalization, weighting
-
- @torch.no_grad()
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
- cond_key=None, return_original_cond=False, bs=None):
- x = super().get_input(batch, k)
- if bs is not None:
- x = x[:bs]
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
-
- if self.model.conditioning_key is not None:
- if cond_key is None:
- cond_key = self.cond_stage_key
- if cond_key != self.first_stage_key:
- if cond_key in ['caption', 'coordinates_bbox']:
- xc = batch[cond_key]
- elif cond_key == 'class_label':
- xc = batch
- else:
- xc = super().get_input(batch, cond_key).to(self.device)
- else:
- xc = x
- if not self.cond_stage_trainable or force_c_encode:
- if isinstance(xc, dict) or isinstance(xc, list):
- # import pudb; pudb.set_trace()
- c = self.get_learned_conditioning(xc)
- else:
- c = self.get_learned_conditioning(xc.to(self.device))
- else:
- c = xc
- if bs is not None:
- c = c[:bs]
-
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- ckey = __conditioning_keys__[self.model.conditioning_key]
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
-
- else:
- c = None
- xc = None
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- c = {'pos_x': pos_x, 'pos_y': pos_y}
- out = [z, c]
- if return_first_stage_outputs:
- xrec = self.decode_first_stage(z)
- out.extend([x, xrec])
- if return_original_cond:
- out.append(xc)
- return out
-
- @torch.no_grad()
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
- if predict_cids:
- if z.dim() == 4:
- z = torch.argmax(z.exp(), dim=1).long()
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
-
- z = 1. / self.scale_factor * z
-
- if hasattr(self, "split_input_params"):
- if self.split_input_params["patch_distributed_vq"]:
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
- uf = self.split_input_params["vqf"]
- bs, nc, h, w = z.shape
- if ks[0] > h or ks[1] > w:
- ks = (min(ks[0], h), min(ks[1], w))
- print("reducing Kernel")
-
- if stride[0] > h or stride[1] > w:
- stride = (min(stride[0], h), min(stride[1], w))
- print("reducing stride")
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
-
- z = unfold(z) # (bn, nc * prod(**ks), L)
- # 1. Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- # 2. apply model loop over last dim
- if isinstance(self.first_stage_model, VQModelInterface):
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
- force_not_quantize=predict_cids or force_not_quantize)
- for i in range(z.shape[-1])]
- else:
-
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
- for i in range(z.shape[-1])]
-
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
- o = o * weighting
- # Reverse 1. reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- decoded = fold(o)
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
- return decoded
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- # same as above but without decorator
- def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
- if predict_cids:
- if z.dim() == 4:
- z = torch.argmax(z.exp(), dim=1).long()
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
-
- z = 1. / self.scale_factor * z
-
- if hasattr(self, "split_input_params"):
- if self.split_input_params["patch_distributed_vq"]:
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
- uf = self.split_input_params["vqf"]
- bs, nc, h, w = z.shape
- if ks[0] > h or ks[1] > w:
- ks = (min(ks[0], h), min(ks[1], w))
- print("reducing Kernel")
-
- if stride[0] > h or stride[1] > w:
- stride = (min(stride[0], h), min(stride[1], w))
- print("reducing stride")
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
-
- z = unfold(z) # (bn, nc * prod(**ks), L)
- # 1. Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- # 2. apply model loop over last dim
- if isinstance(self.first_stage_model, VQModelInterface):
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
- force_not_quantize=predict_cids or force_not_quantize)
- for i in range(z.shape[-1])]
- else:
-
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
- for i in range(z.shape[-1])]
-
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
- o = o * weighting
- # Reverse 1. reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- decoded = fold(o)
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
- return decoded
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- @torch.no_grad()
- def encode_first_stage(self, x):
- if hasattr(self, "split_input_params"):
- if self.split_input_params["patch_distributed_vq"]:
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
- df = self.split_input_params["vqf"]
- self.split_input_params['original_image_size'] = x.shape[-2:]
- bs, nc, h, w = x.shape
- if ks[0] > h or ks[1] > w:
- ks = (min(ks[0], h), min(ks[1], w))
- print("reducing Kernel")
-
- if stride[0] > h or stride[1] > w:
- stride = (min(stride[0], h), min(stride[1], w))
- print("reducing stride")
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
- z = unfold(x) # (bn, nc * prod(**ks), L)
- # Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
- for i in range(z.shape[-1])]
-
- o = torch.stack(output_list, axis=-1)
- o = o * weighting
-
- # Reverse reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- decoded = fold(o)
- decoded = decoded / normalization
- return decoded
-
- else:
- return self.first_stage_model.encode(x)
- else:
- return self.first_stage_model.encode(x)
-
- def shared_step(self, batch, **kwargs):
- x, c = self.get_input(batch, self.first_stage_key)
- loss = self(x, c)
- return loss
-
- def forward(self, x, c, *args, **kwargs):
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- if self.model.conditioning_key is not None:
- assert c is not None
- if self.cond_stage_trainable:
- c = self.get_learned_conditioning(c)
- if self.shorten_cond_schedule: # TODO: drop this option
- tc = self.cond_ids[t].to(self.device)
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
- return self.p_losses(x, c, t, *args, **kwargs)
-
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
- def rescale_bbox(bbox):
- x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
- y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
- return x0, y0, w, h
-
- return [rescale_bbox(b) for b in bboxes]
-
- def apply_model(self, x_noisy, t, cond, return_ids=False):
-
- if isinstance(cond, dict):
- # hybrid case, cond is exptected to be a dict
- pass
- else:
- if not isinstance(cond, list):
- cond = [cond]
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
- cond = {key: cond}
-
- if hasattr(self, "split_input_params"):
- assert len(cond) == 1 # todo can only deal with one conditioning atm
- assert not return_ids
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
-
- h, w = x_noisy.shape[-2:]
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
-
- z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
- # Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
- z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
-
- if self.cond_stage_key in ["image", "LR_image", "segmentation",
- 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
- c_key = next(iter(cond.keys())) # get key
- c = next(iter(cond.values())) # get value
- assert (len(c) == 1) # todo extend to list with more than one elem
- c = c[0] # get element
-
- c = unfold(c)
- c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
-
- elif self.cond_stage_key == 'coordinates_bbox':
- assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
-
- # assuming padding of unfold is always 0 and its dilation is always 1
- n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
- full_img_h, full_img_w = self.split_input_params['original_image_size']
- # as we are operating on latents, we need the factor from the original image size to the
- # spatial latent size to properly rescale the crops for regenerating the bbox annotations
- num_downs = self.first_stage_model.encoder.num_resolutions - 1
- rescale_latent = 2 ** (num_downs)
-
- # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
- # need to rescale the tl patch coordinates to be in between (0,1)
- tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
- rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
- for patch_nr in range(z.shape[-1])]
-
- # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
- patch_limits = [(x_tl, y_tl,
- rescale_latent * ks[0] / full_img_w,
- rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
- # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
-
- # tokenize crop coordinates for the bounding boxes of the respective patches
- patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
- for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
- print(patch_limits_tknzd[0].shape)
- # cut tknzd crop position from conditioning
- assert isinstance(cond, dict), 'cond must be dict to be fed into model'
- cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
- print(cut_cond.shape)
-
- adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
- adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
- print(adapted_cond.shape)
- adapted_cond = self.get_learned_conditioning(adapted_cond)
- print(adapted_cond.shape)
- adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
- print(adapted_cond.shape)
-
- cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
-
- else:
- cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
-
- # apply model by loop over crops
- output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
- assert not isinstance(output_list[0],
- tuple) # todo cant deal with multiple model outputs check this never happens
-
- o = torch.stack(output_list, axis=-1)
- o = o * weighting
- # Reverse reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- x_recon = fold(o) / normalization
-
- else:
- x_recon = self.model(x_noisy, t, **cond)
-
- if isinstance(x_recon, tuple) and not return_ids:
- return x_recon[0]
- else:
- return x_recon
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
-
- def _prior_bpd(self, x_start):
- """
- Get the prior KL term for the variational lower-bound, measured in
- bits-per-dim.
- This term can't be optimized, as it only depends on the encoder.
- :param x_start: the [N x C x ...] tensor of inputs.
- :return: a batch of [N] KL values (in bits), one per batch element.
- """
- batch_size = x_start.shape[0]
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
- return mean_flat(kl_prior) / np.log(2.0)
-
- def p_losses(self, x_start, cond, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_output = self.apply_model(x_noisy, t, cond)
-
- loss_dict = {}
- prefix = 'train' if self.training else 'val'
-
- if self.parameterization == "x0":
- target = x_start
- elif self.parameterization == "eps":
- target = noise
- else:
- raise NotImplementedError()
-
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
-
- logvar_t = self.logvar[t].to(self.device)
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
- if self.learn_logvar:
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
- loss_dict.update({'logvar': self.logvar.data.mean()})
-
- loss = self.l_simple_weight * loss.mean()
-
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
- loss += (self.original_elbo_weight * loss_vlb)
- loss_dict.update({f'{prefix}/loss': loss})
-
- return loss, loss_dict
-
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
- return_x0=False, score_corrector=None, corrector_kwargs=None):
- t_in = t
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
-
- if score_corrector is not None:
- assert self.parameterization == "eps"
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
-
- if return_codebook_ids:
- model_out, logits = model_out
-
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- else:
- raise NotImplementedError()
-
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
- if quantize_denoised:
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- if return_codebook_ids:
- return model_mean, posterior_variance, posterior_log_variance, logits
- elif return_x0:
- return model_mean, posterior_variance, posterior_log_variance, x_recon
- else:
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
- b, *_, device = *x.shape, x.device
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
- return_codebook_ids=return_codebook_ids,
- quantize_denoised=quantize_denoised,
- return_x0=return_x0,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if return_codebook_ids:
- raise DeprecationWarning("Support dropped.")
- model_mean, _, model_log_variance, logits = outputs
- elif return_x0:
- model_mean, _, model_log_variance, x0 = outputs
- else:
- model_mean, _, model_log_variance = outputs
-
- noise = noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
-
- if return_codebook_ids:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
- if return_x0:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
- else:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
- log_every_t=None):
- if not log_every_t:
- log_every_t = self.log_every_t
- timesteps = self.num_timesteps
- if batch_size is not None:
- b = batch_size if batch_size is not None else shape[0]
- shape = [batch_size] + list(shape)
- else:
- b = batch_size = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=self.device)
- else:
- img = x_T
- intermediates = []
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
- total=timesteps) if verbose else reversed(
- range(0, timesteps))
- if type(temperature) == float:
- temperature = [temperature] * timesteps
-
- for i in iterator:
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img, x0_partial = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised, return_x0=True,
- temperature=temperature[i], noise_dropout=noise_dropout,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if mask is not None:
- assert x0 is not None
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_loop(self, cond, shape, return_intermediates=False,
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, start_T=None,
- log_every_t=None):
-
- if not log_every_t:
- log_every_t = self.log_every_t
- device = self.betas.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- intermediates = [img]
- if timesteps is None:
- timesteps = self.num_timesteps
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
- range(0, timesteps))
-
- if mask is not None:
- assert x0 is not None
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
-
- for i in iterator:
- ts = torch.full((b,), i, device=device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised)
- if mask is not None:
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
-
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
- verbose=True, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, shape=None,**kwargs):
- if shape is None:
- shape = (batch_size, self.channels, self.image_size, self.image_size)
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
- return self.p_sample_loop(cond,
- shape,
- return_intermediates=return_intermediates, x_T=x_T,
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
- mask=mask, x0=x0)
-
- @torch.no_grad()
- def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
-
- if ddim:
- ddim_sampler = DDIMSampler(self)
- shape = (self.channels, self.image_size, self.image_size)
- samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
- shape,cond,verbose=False,**kwargs)
-
- else:
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
- return_intermediates=True,**kwargs)
-
- return samples, intermediates
-
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
- plot_diffusion_rows=True, **kwargs):
-
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
- return_first_stage_outputs=True,
- force_c_encode=True,
- return_original_cond=True,
- bs=N)
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x
- log["reconstruction"] = xrec
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):
- xc = self.cond_stage_model.decode(c)
- log["conditioning"] = xc
- elif self.cond_stage_key in ["caption"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
- log["conditioning"] = xc
- elif self.cond_stage_key == 'class_label':
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
- log['conditioning'] = xc
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if plot_diffusion_rows:
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:
- # get denoise row
- with self.ema_scope("Plotting"):
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
- ddim_steps=ddim_steps,eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
- self.first_stage_model, IdentityFirstStage):
- # also display when quantizing x0 while sampling
- with self.ema_scope("Plotting Quantized Denoised"):
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
- ddim_steps=ddim_steps,eta=ddim_eta,
- quantize_denoised=True)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
- # quantize_denoised=True)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_x0_quantized"] = x_samples
-
- if inpaint:
- # make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
- mask = torch.ones(N, h, w).to(self.device)
- # zeros will be filled in
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
- mask = mask[:, None, ...]
- with self.ema_scope("Plotting Inpaint"):
-
- samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_inpainting"] = x_samples
- log["mask"] = mask
-
- # outpaint
- with self.ema_scope("Plotting Outpaint"):
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_outpainting"] = x_samples
-
- if plot_progressive_rows:
- with self.ema_scope("Plotting Progressives"):
- img, progressives = self.progressive_denoising(c,
- shape=(self.channels, self.image_size, self.image_size),
- batch_size=N)
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
- log["progressive_row"] = prog_row
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.cond_stage_trainable:
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
- params = params + list(self.cond_stage_model.parameters())
- if self.learn_logvar:
- print('Diffusion model optimizing logvar')
- params.append(self.logvar)
- opt = torch.optim.AdamW(params, lr=lr)
- if self.use_scheduler:
- assert 'target' in self.scheduler_config
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- }]
- return [opt], scheduler
- return opt
-
- @torch.no_grad()
- def to_rgb(self, x):
- x = x.float()
- if not hasattr(self, "colorize"):
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
- x = nn.functional.conv2d(x, weight=self.colorize)
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
- return x
-
-
-class DiffusionWrapper(pl.LightningModule):
- def __init__(self, diff_model_config, conditioning_key):
- super().__init__()
- self.diffusion_model = instantiate_from_config(diff_model_config)
- self.conditioning_key = conditioning_key
- assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
-
- def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
- if self.conditioning_key is None:
- out = self.diffusion_model(x, t)
- elif self.conditioning_key == 'concat':
- xc = torch.cat([x] + c_concat, dim=1)
- out = self.diffusion_model(xc, t)
- elif self.conditioning_key == 'crossattn':
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(x, t, context=cc)
- elif self.conditioning_key == 'hybrid':
- xc = torch.cat([x] + c_concat, dim=1)
- cc = torch.cat(c_crossattn, 1)
- out = self.diffusion_model(xc, t, context=cc)
- elif self.conditioning_key == 'adm':
- cc = c_crossattn[0]
- out = self.diffusion_model(x, t, y=cc)
- else:
- raise NotImplementedError()
-
- return out
-
-
-class Layout2ImgDiffusion(LatentDiffusion):
- # TODO: move all layout-specific hacks to this class
- def __init__(self, cond_stage_key, *args, **kwargs):
- assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
- super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
-
- def log_images(self, batch, N=8, *args, **kwargs):
- logs = super().log_images(batch=batch, N=N, *args, **kwargs)
-
- key = 'train' if self.training else 'validation'
- dset = self.trainer.datamodule.datasets[key]
- mapper = dset.conditional_builders[self.cond_stage_key]
-
- bbox_imgs = []
- map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
- for tknzd_bbox in batch[self.cond_stage_key][:N]:
- bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
- bbox_imgs.append(bboximg)
-
- cond_img = torch.stack(bbox_imgs, dim=0)
- logs['bbox_image'] = cond_img
- return logs
diff --git a/spaces/ModIA/FrenchDroneKeyword/gradio_utils.py b/spaces/ModIA/FrenchDroneKeyword/gradio_utils.py
deleted file mode 100644
index 25361fbe412c34145363fd538e2260c4a149ff45..0000000000000000000000000000000000000000
--- a/spaces/ModIA/FrenchDroneKeyword/gradio_utils.py
+++ /dev/null
@@ -1,43 +0,0 @@
-
-from typing import Callable, Optional
-
-import numpy as np
-
-import librosa
-
-import gradio as gr
-from datetime import datetime
-
-
-def predict_gradio(data,
- uniform_lambda,
- sklearn_model,
- label_transform,
- target_sr: int = 22_050):
- if data is None:
- return
-
- classes = sklearn_model.classes_
- if label_transform is not None:
- classes = label_transform.inverse_transform(classes)
-
-
- y, sr = data[1], data[0]
- y_original_signal = load_as_librosa(y, sr, target_sr=target_sr)
- y_uniform = uniform_lambda(y_original_signal, target_sr).astype(np.float32)
- prediction = sklearn_model.predict_proba(y_uniform.reshape(1, -1))
- result = {str(label): float(confidence) for (
- label, confidence) in zip(classes, prediction.flatten())}
- print(f"{datetime.now()}")
- return result
-
-def load_as_librosa(y: np.ndarray, sr: int, target_sr: int = 22050) -> np.ndarray:
- data_dtype = y.dtype
- dtype_min = np.iinfo(data_dtype).min
- dtype_max = np.iinfo(data_dtype).max
- dtype_range = np.abs(dtype_max-dtype_min)
- y_normalize = (y.astype(np.float32)-dtype_min)/dtype_range
- y_normalize_resample = librosa.resample(y=y_normalize,
- orig_sr=sr,
- target_sr=target_sr)
- return y_normalize_resample
\ No newline at end of file
diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/kie/sdmgr/_base_sdmgr_unet16.py b/spaces/Mountchicken/MAERec-Gradio/configs/kie/sdmgr/_base_sdmgr_unet16.py
deleted file mode 100644
index 76aa631bdfbbf29013d27ac76c0e160d232d1500..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/configs/kie/sdmgr/_base_sdmgr_unet16.py
+++ /dev/null
@@ -1,28 +0,0 @@
-_base_ = '_base_sdmgr_novisual.py'
-
-model = dict(
- backbone=dict(type='UNet', base_channels=16),
- roi_extractor=dict(
- type='mmdet.SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=7),
- featmap_strides=[1]),
- data_preprocessor=dict(
- type='ImgDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_size_divisor=32),
-)
-
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadKIEAnnotations'),
- dict(type='Resize', scale=(1024, 512), keep_ratio=True),
- dict(type='PackKIEInputs')
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadKIEAnnotations'),
- dict(type='Resize', scale=(1024, 512), keep_ratio=True),
- dict(type='PackKIEInputs', meta_keys=('img_path', )),
-]
diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50_fpnc_1200e_icdar2015.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50_fpnc_1200e_icdar2015.py
deleted file mode 100644
index 0e2f2789c953238b04b3d42a6da1a8c5887b13d7..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50_fpnc_1200e_icdar2015.py
+++ /dev/null
@@ -1,24 +0,0 @@
-_base_ = [
- 'dbnetpp_resnet50-dcnv2_fpnc_1200e_icdar2015.py',
-]
-
-load_from = None
-
-_base_.model.backbone = dict(
- type='mmdet.ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=-1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch',
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'))
-
-_base_.train_dataloader.num_workers = 24
-_base_.optim_wrapper.optimizer.lr = 0.003
-
-param_scheduler = [
- dict(type='LinearLR', end=200, start_factor=0.001),
- dict(type='PolyLR', power=0.9, eta_min=1e-7, begin=200, end=1200),
-]
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/transforms/loading.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/transforms/loading.py
deleted file mode 100644
index e9a3af8189edb4159a4676c6401a0364981bc4d7..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/transforms/loading.py
+++ /dev/null
@@ -1,572 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-import warnings
-from typing import Optional, Union
-
-import mmcv
-import mmengine.fileio as fileio
-import numpy as np
-from mmcv.transforms import BaseTransform
-from mmcv.transforms import LoadAnnotations as MMCV_LoadAnnotations
-from mmcv.transforms import LoadImageFromFile as MMCV_LoadImageFromFile
-
-from mmocr.registry import TRANSFORMS
-
-
-@TRANSFORMS.register_module()
-class LoadImageFromFile(MMCV_LoadImageFromFile):
- """Load an image from file.
-
- Required Keys:
-
- - img_path
-
- Modified Keys:
-
- - img
- - img_shape
- - ori_shape
-
- Args:
- to_float32 (bool): Whether to convert the loaded image to a float32
- numpy array. If set to False, the loaded image is an uint8 array.
- Defaults to False.
- color_type (str): The flag argument for :func:``mmcv.imfrombytes``.
- Defaults to 'color'.
- imdecode_backend (str): The image decoding backend type. The backend
- argument for :func:``mmcv.imfrombytes``.
- See :func:``mmcv.imfrombytes`` for details.
- Defaults to 'cv2'.
- file_client_args (dict): Arguments to instantiate a FileClient.
- See :class:`mmengine.fileio.FileClient` for details.
- Defaults to None. It will be deprecated in future. Please use
- ``backend_args`` instead.
- Deprecated in version 1.0.0rc6.
- backend_args (dict, optional): Instantiates the corresponding file
- backend. It may contain `backend` key to specify the file
- backend. If it contains, the file backend corresponding to this
- value will be used and initialized with the remaining values,
- otherwise the corresponding file backend will be selected
- based on the prefix of the file path. Defaults to None.
- New in version 1.0.0rc6.
- ignore_empty (bool): Whether to allow loading empty image or file path
- not existent. Defaults to False.
- min_size (int): The minimum size of the image to be loaded. If the
- image is smaller than the minimum size, it will be regarded as a
- broken image. Defaults to 0.
- """
-
- def __init__(
- self,
- to_float32: bool = False,
- color_type: str = 'color',
- imdecode_backend: str = 'cv2',
- file_client_args: Optional[dict] = None,
- min_size: int = 0,
- ignore_empty: bool = False,
- *,
- backend_args: Optional[dict] = None,
- ) -> None:
- self.ignore_empty = ignore_empty
- self.to_float32 = to_float32
- self.color_type = color_type
- self.imdecode_backend = imdecode_backend
- self.min_size = min_size
- self.file_client_args = file_client_args
- self.backend_args = backend_args
- if file_client_args is not None:
- warnings.warn(
- '"file_client_args" will be deprecated in future. '
- 'Please use "backend_args" instead', DeprecationWarning)
- if backend_args is not None:
- raise ValueError(
- '"file_client_args" and "backend_args" cannot be set '
- 'at the same time.')
-
- self.file_client_args = file_client_args.copy()
- if backend_args is not None:
- self.backend_args = backend_args.copy()
-
- def transform(self, results: dict) -> Optional[dict]:
- """Functions to load image.
-
- Args:
- results (dict): Result dict from :obj:``mmcv.BaseDataset``.
-
- Returns:
- dict: The dict contains loaded image and meta information.
- """
-
- filename = results['img_path']
- try:
- if getattr(self, 'file_client_args', None) is not None:
- file_client = fileio.FileClient.infer_client(
- self.file_client_args, filename)
- img_bytes = file_client.get(filename)
- else:
- img_bytes = fileio.get(
- filename, backend_args=self.backend_args)
- img = mmcv.imfrombytes(
- img_bytes, flag=self.color_type, backend=self.imdecode_backend)
- except Exception as e:
- if self.ignore_empty:
- warnings.warn(f'Failed to load {filename} due to {e}')
- return None
- else:
- raise e
- if img is None or min(img.shape[:2]) < self.min_size:
- if self.ignore_empty:
- warnings.warn(f'Ignore broken image: {filename}')
- return None
- raise IOError(f'{filename} is broken')
-
- if self.to_float32:
- img = img.astype(np.float32)
-
- results['img'] = img
- results['img_shape'] = img.shape[:2]
- results['ori_shape'] = img.shape[:2]
- return results
-
- def __repr__(self):
- repr_str = (f'{self.__class__.__name__}('
- f'ignore_empty={self.ignore_empty}, '
- f'min_size={self.min_size}, '
- f'to_float32={self.to_float32}, '
- f"color_type='{self.color_type}', "
- f"imdecode_backend='{self.imdecode_backend}', ")
-
- if self.file_client_args is not None:
- repr_str += f'file_client_args={self.file_client_args})'
- else:
- repr_str += f'backend_args={self.backend_args})'
- return repr_str
-
-
-@TRANSFORMS.register_module()
-class LoadImageFromNDArray(LoadImageFromFile):
- """Load an image from ``results['img']``.
-
- Similar with :obj:`LoadImageFromFile`, but the image has been loaded as
- :obj:`np.ndarray` in ``results['img']``. Can be used when loading image
- from webcam.
-
- Required Keys:
-
- - img
-
- Modified Keys:
-
- - img
- - img_path
- - img_shape
- - ori_shape
-
- Args:
- to_float32 (bool): Whether to convert the loaded image to a float32
- numpy array. If set to False, the loaded image is an uint8 array.
- Defaults to False.
- """
-
- def transform(self, results: dict) -> dict:
- """Transform function to add image meta information.
-
- Args:
- results (dict): Result dict with Webcam read image in
- ``results['img']``.
-
- Returns:
- dict: The dict contains loaded image and meta information.
- """
-
- img = results['img']
- if self.to_float32:
- img = img.astype(np.float32)
- if self.color_type == 'grayscale':
- img = mmcv.image.rgb2gray(img)
- results['img'] = img
- if results.get('img_path', None) is None:
- results['img_path'] = None
- results['img_shape'] = img.shape[:2]
- results['ori_shape'] = img.shape[:2]
- return results
-
-
-@TRANSFORMS.register_module()
-class InferencerLoader(BaseTransform):
- """Load the image in Inferencer's pipeline.
-
- Modified Keys:
-
- - img
- - img_path
- - img_shape
- - ori_shape
-
- Args:
- to_float32 (bool): Whether to convert the loaded image to a float32
- numpy array. If set to False, the loaded image is an uint8 array.
- Defaults to False.
- """
-
- def __init__(self, **kwargs) -> None:
- super().__init__()
- self.from_file = TRANSFORMS.build(
- dict(type='LoadImageFromFile', **kwargs))
- self.from_ndarray = TRANSFORMS.build(
- dict(type='LoadImageFromNDArray', **kwargs))
-
- def transform(self, single_input: Union[str, np.ndarray, dict]) -> dict:
- """Transform function to add image meta information.
-
- Args:
- single_input (str or dict or np.ndarray): The raw input from
- inferencer.
-
- Returns:
- dict: The dict contains loaded image and meta information.
- """
- if isinstance(single_input, str):
- inputs = dict(img_path=single_input)
- elif isinstance(single_input, np.ndarray):
- inputs = dict(img=single_input)
- elif isinstance(single_input, dict):
- inputs = single_input
- else:
- raise NotImplementedError
-
- if 'img' in inputs:
- return self.from_ndarray(inputs)
-
- return self.from_file(inputs)
-
-
-@TRANSFORMS.register_module()
-class LoadOCRAnnotations(MMCV_LoadAnnotations):
- """Load and process the ``instances`` annotation provided by dataset.
-
- The annotation format is as the following:
-
- .. code-block:: python
-
- {
- 'instances':
- [
- {
- # List of 4 numbers representing the bounding box of the
- # instance, in (x1, y1, x2, y2) order.
- # used in text detection or text spotting tasks.
- 'bbox': [x1, y1, x2, y2],
-
- # Label of instance, usually it's 0.
- # used in text detection or text spotting tasks.
- 'bbox_label': 0,
-
- # List of n numbers representing the polygon of the
- # instance, in (xn, yn) order.
- # used in text detection/ textspotter.
- "polygon": [x1, y1, x2, y2, ... xn, yn],
-
- # The flag indicating whether the instance should be ignored.
- # used in text detection or text spotting tasks.
- "ignore": False,
-
- # The groundtruth of text.
- # used in text recognition or text spotting tasks.
- "text": 'tmp',
- }
- ]
- }
-
- After this module, the annotation has been changed to the format below:
-
- .. code-block:: python
-
- {
- # In (x1, y1, x2, y2) order, float type. N is the number of bboxes
- # in np.float32
- 'gt_bboxes': np.ndarray(N, 4)
- # In np.int64 type.
- 'gt_bboxes_labels': np.ndarray(N, )
- # In (x1, y1,..., xk, yk) order, float type.
- # in list[np.float32]
- 'gt_polygons': list[np.ndarray(2k, )]
- # In np.bool_ type.
- 'gt_ignored': np.ndarray(N, )
- # In list[str]
- 'gt_texts': list[str]
- }
-
- Required Keys:
-
- - instances
-
- - bbox (optional)
- - bbox_label (optional)
- - polygon (optional)
- - ignore (optional)
- - text (optional)
-
- Added Keys:
-
- - gt_bboxes (np.float32)
- - gt_bboxes_labels (np.int64)
- - gt_polygons (list[np.float32])
- - gt_ignored (np.bool_)
- - gt_texts (list[str])
-
- Args:
- with_bbox (bool): Whether to parse and load the bbox annotation.
- Defaults to False.
- with_label (bool): Whether to parse and load the label annotation.
- Defaults to False.
- with_polygon (bool): Whether to parse and load the polygon annotation.
- Defaults to False.
- with_text (bool): Whether to parse and load the text annotation.
- Defaults to False.
- """
-
- def __init__(self,
- with_bbox: bool = False,
- with_label: bool = False,
- with_polygon: bool = False,
- with_text: bool = False,
- **kwargs) -> None:
- super().__init__(with_bbox=with_bbox, with_label=with_label, **kwargs)
- self.with_polygon = with_polygon
- self.with_text = with_text
- self.with_ignore = with_bbox or with_polygon
-
- def _load_ignore_flags(self, results: dict) -> None:
- """Private function to load ignore annotations.
-
- Args:
- results (dict): Result dict from :obj:``OCRDataset``.
-
- Returns:
- dict: The dict contains loaded ignore annotations.
- """
- gt_ignored = []
- for instance in results['instances']:
- gt_ignored.append(instance['ignore'])
- results['gt_ignored'] = np.array(gt_ignored, dtype=np.bool_)
-
- def _load_polygons(self, results: dict) -> None:
- """Private function to load polygon annotations.
-
- Args:
- results (dict): Result dict from :obj:``OCRDataset``.
-
- Returns:
- dict: The dict contains loaded polygon annotations.
- """
-
- gt_polygons = []
- for instance in results['instances']:
- gt_polygons.append(np.array(instance['polygon'], dtype=np.float32))
- results['gt_polygons'] = gt_polygons
-
- def _load_texts(self, results: dict) -> None:
- """Private function to load text annotations.
-
- Args:
- results (dict): Result dict from :obj:``OCRDataset``.
-
- Returns:
- dict: The dict contains loaded text annotations.
- """
- gt_texts = []
- for instance in results['instances']:
- gt_texts.append(instance['text'])
- results['gt_texts'] = gt_texts
-
- def transform(self, results: dict) -> dict:
- """Function to load multiple types annotations.
-
- Args:
- results (dict): Result dict from :obj:``OCRDataset``.
-
- Returns:
- dict: The dict contains loaded bounding box, label polygon and
- text annotations.
- """
- results = super().transform(results)
- if self.with_polygon:
- self._load_polygons(results)
- if self.with_text:
- self._load_texts(results)
- if self.with_ignore:
- self._load_ignore_flags(results)
- return results
-
- def __repr__(self) -> str:
- repr_str = self.__class__.__name__
- repr_str += f'(with_bbox={self.with_bbox}, '
- repr_str += f'with_label={self.with_label}, '
- repr_str += f'with_polygon={self.with_polygon}, '
- repr_str += f'with_text={self.with_text}, '
- repr_str += f"imdecode_backend='{self.imdecode_backend}', "
-
- if self.file_client_args is not None:
- repr_str += f'file_client_args={self.file_client_args})'
- else:
- repr_str += f'backend_args={self.backend_args})'
- return repr_str
-
-
-@TRANSFORMS.register_module()
-class LoadKIEAnnotations(MMCV_LoadAnnotations):
- """Load and process the ``instances`` annotation provided by dataset.
-
- The annotation format is as the following:
-
- .. code-block:: python
-
- {
- # A nested list of 4 numbers representing the bounding box of the
- # instance, in (x1, y1, x2, y2) order.
- 'bbox': np.array([[x1, y1, x2, y2], [x1, y1, x2, y2], ...],
- dtype=np.int32),
-
- # Labels of boxes. Shape is (N,).
- 'bbox_labels': np.array([0, 2, ...], dtype=np.int32),
-
- # Labels of edges. Shape (N, N).
- 'edge_labels': np.array([0, 2, ...], dtype=np.int32),
-
- # List of texts.
- "texts": ['text1', 'text2', ...],
- }
-
- After this module, the annotation has been changed to the format below:
-
- .. code-block:: python
-
- {
- # In (x1, y1, x2, y2) order, float type. N is the number of bboxes
- # in np.float32
- 'gt_bboxes': np.ndarray(N, 4),
- # In np.int64 type.
- 'gt_bboxes_labels': np.ndarray(N, ),
- # In np.int32 type.
- 'gt_edges_labels': np.ndarray(N, N),
- # In list[str]
- 'gt_texts': list[str],
- # tuple(int)
- 'ori_shape': (H, W)
- }
-
- Required Keys:
-
- - bboxes
- - bbox_labels
- - edge_labels
- - texts
-
- Added Keys:
-
- - gt_bboxes (np.float32)
- - gt_bboxes_labels (np.int64)
- - gt_edges_labels (np.int64)
- - gt_texts (list[str])
- - ori_shape (tuple[int])
-
- Args:
- with_bbox (bool): Whether to parse and load the bbox annotation.
- Defaults to True.
- with_label (bool): Whether to parse and load the label annotation.
- Defaults to True.
- with_text (bool): Whether to parse and load the text annotation.
- Defaults to True.
- directed (bool): Whether build edges as a directed graph.
- Defaults to False.
- key_node_idx (int, optional): Key node label, used to mask out edges
- that are not connected from key nodes to value nodes. It has to be
- specified together with ``value_node_idx``. Defaults to None.
- value_node_idx (int, optional): Value node label, used to mask out
- edges that are not connected from key nodes to value nodes. It has
- to be specified together with ``key_node_idx``. Defaults to None.
- """
-
- def __init__(self,
- with_bbox: bool = True,
- with_label: bool = True,
- with_text: bool = True,
- directed: bool = False,
- key_node_idx: Optional[int] = None,
- value_node_idx: Optional[int] = None,
- **kwargs) -> None:
- super().__init__(with_bbox=with_bbox, with_label=with_label, **kwargs)
- self.with_text = with_text
- self.directed = directed
- if key_node_idx is not None or value_node_idx is not None:
- assert key_node_idx is not None and value_node_idx is not None
- self.key_node_idx = key_node_idx
- self.value_node_idx = value_node_idx
-
- def _load_texts(self, results: dict) -> None:
- """Private function to load text annotations.
-
- Args:
- results (dict): Result dict from :obj:``OCRDataset``.
- """
- gt_texts = []
- for instance in results['instances']:
- gt_texts.append(instance['text'])
- results['gt_texts'] = gt_texts
-
- def _load_labels(self, results: dict) -> None:
- """Private function to load label annotations.
-
- Args:
- results (dict): Result dict from :obj:``WildReceiptDataset``.
- """
- bbox_labels = []
- edge_labels = []
- for instance in results['instances']:
- bbox_labels.append(instance['bbox_label'])
- edge_labels.append(instance['edge_label'])
-
- bbox_labels = np.array(bbox_labels, np.int32)
- edge_labels = np.array(edge_labels)
- edge_labels = (edge_labels[:, None] == edge_labels[None, :]).astype(
- np.int32)
-
- if self.directed:
- edge_labels = (edge_labels & bbox_labels == 1).astype(np.int32)
-
- if hasattr(self, 'key_node_idx'):
- key_nodes_mask = bbox_labels == self.key_node_idx
- value_nodes_mask = bbox_labels == self.value_node_idx
- key2value_mask = key_nodes_mask[:,
- None] * value_nodes_mask[None, :]
- edge_labels[~key2value_mask] = -1
-
- np.fill_diagonal(edge_labels, -1)
-
- results['gt_edges_labels'] = edge_labels.astype(np.int64)
- results['gt_bboxes_labels'] = bbox_labels.astype(np.int64)
-
- def transform(self, results: dict) -> dict:
- """Function to load multiple types annotations.
-
- Args:
- results (dict): Result dict from :obj:``OCRDataset``.
-
- Returns:
- dict: The dict contains loaded bounding box, label polygon and
- text annotations.
- """
- if 'ori_shape' not in results:
- results['ori_shape'] = copy.deepcopy(results['img_shape'])
- results = super().transform(results)
- if self.with_text:
- self._load_texts(results)
- return results
-
- def __repr__(self) -> str:
- repr_str = self.__class__.__name__
- repr_str += f'(with_bbox={self.with_bbox}, '
- repr_str += f'with_label={self.with_label}, '
- repr_str += f'with_text={self.with_text})'
- return repr_str
diff --git a/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textrecog/cocotext_converter.py b/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textrecog/cocotext_converter.py
deleted file mode 100644
index 413c09b6c32c7f31ec86fe46c42d69809986bbf9..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textrecog/cocotext_converter.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import argparse
-import math
-import os.path as osp
-from functools import partial
-
-import mmcv
-import mmengine
-
-from mmocr.utils import dump_ocr_data
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='Generate training and validation set of COCO Text v2 ')
- parser.add_argument('root_path', help='Root dir path of COCO Text v2')
- parser.add_argument(
- '--nproc', default=1, type=int, help='Number of processes')
- parser.add_argument(
- '--preserve-vertical',
- help='Preserve samples containing vertical texts',
- action='store_true')
- args = parser.parse_args()
- return args
-
-
-def process_img(args, src_image_root, dst_image_root, ignore_image_root,
- preserve_vertical, split):
- # Dirty hack for multi-processing
- img_idx, img_info, anns = args
- src_img = mmcv.imread(osp.join(src_image_root, img_info['file_name']))
- label = []
- for ann_idx, ann in enumerate(anns):
- text_label = ann['utf8_string']
-
- # Ignore illegible or non-English words
- if ann['language'] == 'not english':
- continue
- if ann['legibility'] == 'illegible':
- continue
-
- x, y, w, h = ann['bbox']
- x, y = max(0, math.floor(x)), max(0, math.floor(y))
- w, h = math.ceil(w), math.ceil(h)
- dst_img = src_img[y:y + h, x:x + w]
- dst_img_name = f'img_{img_idx}_{ann_idx}.jpg'
-
- if not preserve_vertical and h / w > 2 and split == 'train':
- dst_img_path = osp.join(ignore_image_root, dst_img_name)
- mmcv.imwrite(dst_img, dst_img_path)
- continue
-
- dst_img_path = osp.join(dst_image_root, dst_img_name)
- mmcv.imwrite(dst_img, dst_img_path)
-
- label.append({
- 'file_name': dst_img_name,
- 'anno_info': [{
- 'text': text_label
- }]
- })
-
- return label
-
-
-def convert_cocotext(root_path,
- split,
- preserve_vertical,
- nproc,
- img_start_idx=0):
- """Collect the annotation information and crop the images.
-
- The annotation format is as the following:
- {
- 'anns':{
- '45346':{
- 'mask': [468.9,286.7,468.9,295.2,493.0,295.8,493.0,287.2],
- 'class': 'machine printed',
- 'bbox': [468.9, 286.7, 24.1, 9.1], # x, y, w, h
- 'image_id': 217925,
- 'id': 45346,
- 'language': 'english', # 'english' or 'not english'
- 'area': 206.06,
- 'utf8_string': 'New',
- 'legibility': 'legible', # 'legible' or 'illegible'
- },
- ...
- }
- 'imgs':{
- '540965':{
- 'id': 540965,
- 'set': 'train', # 'train' or 'val'
- 'width': 640,
- 'height': 360,
- 'file_name': 'COCO_train2014_000000540965.jpg'
- },
- ...
- }
- 'imgToAnns':{
- '540965': [],
- '260932': [63993, 63994, 63995, 63996, 63997, 63998, 63999],
- ...
- }
- }
-
- Args:
- root_path (str): Root path to the dataset
- split (str): Dataset split, which should be 'train' or 'val'
- preserve_vertical (bool): Whether to preserve vertical texts
- nproc (int): Number of processes
- img_start_idx (int): Index of start image
-
- Returns:
- img_info (dict): The dict of the img and annotation information
- """
-
- annotation_path = osp.join(root_path, 'annotations/cocotext.v2.json')
- if not osp.exists(annotation_path):
- raise Exception(
- f'{annotation_path} not exists, please check and try again.')
-
- annotation = mmengine.load(annotation_path)
- # outputs
- dst_label_file = osp.join(root_path, f'{split}_label.json')
- dst_image_root = osp.join(root_path, 'crops', split)
- ignore_image_root = osp.join(root_path, 'ignores', split)
- src_image_root = osp.join(root_path, 'imgs')
- mmengine.mkdir_or_exist(dst_image_root)
- mmengine.mkdir_or_exist(ignore_image_root)
-
- process_img_with_path = partial(
- process_img,
- src_image_root=src_image_root,
- dst_image_root=dst_image_root,
- ignore_image_root=ignore_image_root,
- preserve_vertical=preserve_vertical,
- split=split)
- tasks = []
- for img_idx, img_info in enumerate(annotation['imgs'].values()):
- if img_info['set'] == split:
- ann_ids = annotation['imgToAnns'][str(img_info['id'])]
- anns = [annotation['anns'][str(ann_id)] for ann_id in ann_ids]
- tasks.append((img_idx + img_start_idx, img_info, anns))
- labels_list = mmengine.track_parallel_progress(
- process_img_with_path, tasks, keep_order=True, nproc=nproc)
- final_labels = []
- for label_list in labels_list:
- final_labels += label_list
- dump_ocr_data(final_labels, dst_label_file, 'textrecog')
-
- return len(annotation['imgs'])
-
-
-def main():
- args = parse_args()
- root_path = args.root_path
- print('Processing training set...')
- num_train_imgs = convert_cocotext(
- root_path=root_path,
- split='train',
- preserve_vertical=args.preserve_vertical,
- nproc=args.nproc)
- print('Processing validation set...')
- convert_cocotext(
- root_path=root_path,
- split='val',
- preserve_vertical=args.preserve_vertical,
- nproc=args.nproc,
- img_start_idx=num_train_imgs)
- print('Finish')
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/NEXAS/NEXAS-stable_diff_personl/README.md b/spaces/NEXAS/NEXAS-stable_diff_personl/README.md
deleted file mode 100644
index 502a7472adce3e9d8bce0ce598acd38ecf452ae0..0000000000000000000000000000000000000000
--- a/spaces/NEXAS/NEXAS-stable_diff_personl/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: NEXAS-stable Diff Personl
-emoji: 🏢
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.41.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/sort_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/sort_dataset.py
deleted file mode 100644
index b3890e7279e1f26db2e48ec0a91c639e9299d60f..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/sort_dataset.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-
-from . import BaseWrapperDataset
-
-
-class SortDataset(BaseWrapperDataset):
- def __init__(self, dataset, sort_order):
- super().__init__(dataset)
- if not isinstance(sort_order, (list, tuple)):
- sort_order = [sort_order]
- self.sort_order = sort_order
-
- assert all(len(so) == len(dataset) for so in sort_order)
-
- def ordered_indices(self):
- return np.lexsort(self.sort_order)
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/subsample_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/subsample_dataset.py
deleted file mode 100644
index 48feaf883f87dc95f8637c24d3c96f3f9fd8bd1d..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/subsample_dataset.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-
-import numpy as np
-
-from . import BaseWrapperDataset
-
-
-logger = logging.getLogger(__name__)
-
-
-class SubsampleDataset(BaseWrapperDataset):
- """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples
-
- Args:
- dataset (~torch.utils.data.Dataset): dataset to subsample
- size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive)
- """
-
- def __init__(self, dataset, size_ratio, shuffle=False):
- super().__init__(dataset)
- assert size_ratio < 1
- self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int)
- self.indices = np.random.choice(
- list(range(len(self.dataset))), self.actual_size, replace=False
- )
- self.shuffle = shuffle
- logger.info(
- "subsampled dataset from {} to {} (ratio={})".format(
- len(self.dataset), self.actual_size, size_ratio
- )
- )
-
- def __getitem__(self, index):
- return self.dataset[self.indices[index]]
-
- def __len__(self):
- return self.actual_size
-
- def collater(self, samples):
- return self.dataset.collater(samples)
-
- @property
- def sizes(self):
- return self.dataset.sizes[self.indices]
-
- @property
- def name(self):
- return self.dataset.name
-
- def num_tokens(self, index):
- return self.dataset.num_tokens(self.indices[index])
-
- def size(self, index):
- return self.dataset.size(self.indices[index])
-
- def ordered_indices(self):
- """Return an ordered list of indices. Batches will be constructed based
- on this order."""
- if self.shuffle:
- order = [np.random.permutation(len(self))]
- else:
- order = [np.arange(len(self))]
- order.append(self.sizes)
- return np.lexsort(order)
-
- def prefetch(self, indices):
- self.dataset.prefetch(self.indices[indices])
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/measure_teacher_quality.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/measure_teacher_quality.py
deleted file mode 100644
index 92279b2214bb2ba4a99aea92098907ef4f55821b..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/hubert/measure_teacher_quality.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import os.path as op
-import re
-from tabulate import tabulate
-from collections import Counter
-
-
-def comp_purity(p_xy, axis):
- max_p = p_xy.max(axis=axis)
- marg_p = p_xy.sum(axis=axis)
- indv_pur = max_p / marg_p
- aggr_pur = max_p.sum()
- return indv_pur, aggr_pur
-
-
-def comp_entropy(p):
- return (-p * np.log(p + 1e-8)).sum()
-
-
-def comp_norm_mutual_info(p_xy):
- p_x = p_xy.sum(axis=1, keepdims=True)
- p_y = p_xy.sum(axis=0, keepdims=True)
- pmi = np.log(p_xy / np.matmul(p_x, p_y) + 1e-8)
- mi = (p_xy * pmi).sum()
- h_x = comp_entropy(p_x)
- h_y = comp_entropy(p_y)
- return mi, mi / h_x, mi / h_y, h_x, h_y
-
-
-def pad(labs, n):
- if n == 0:
- return np.array(labs)
- return np.concatenate([[labs[0]] * n, labs, [labs[-1]] * n])
-
-
-def comp_avg_seg_dur(labs_list):
- n_frms = 0
- n_segs = 0
- for labs in labs_list:
- labs = np.array(labs)
- edges = np.zeros(len(labs)).astype(bool)
- edges[0] = True
- edges[1:] = labs[1:] != labs[:-1]
- n_frms += len(edges)
- n_segs += edges.astype(int).sum()
- return n_frms / n_segs
-
-
-def comp_joint_prob(uid2refs, uid2hyps):
- """
- Args:
- pad: padding for spliced-feature derived labels
- """
- cnts = Counter()
- skipped = []
- abs_frmdiff = 0
- for uid in uid2refs:
- if uid not in uid2hyps:
- skipped.append(uid)
- continue
- refs = uid2refs[uid]
- hyps = uid2hyps[uid]
- abs_frmdiff += abs(len(refs) - len(hyps))
- min_len = min(len(refs), len(hyps))
- refs = refs[:min_len]
- hyps = hyps[:min_len]
- cnts.update(zip(refs, hyps))
- tot = sum(cnts.values())
-
- ref_set = sorted({ref for ref, _ in cnts.keys()})
- hyp_set = sorted({hyp for _, hyp in cnts.keys()})
- ref2pid = dict(zip(ref_set, range(len(ref_set))))
- hyp2lid = dict(zip(hyp_set, range(len(hyp_set))))
- # print(hyp_set)
- p_xy = np.zeros((len(ref2pid), len(hyp2lid)), dtype=float)
- for (ref, hyp), cnt in cnts.items():
- p_xy[ref2pid[ref], hyp2lid[hyp]] = cnt
- p_xy /= p_xy.sum()
- return p_xy, ref2pid, hyp2lid, tot, abs_frmdiff, skipped
-
-
-def read_phn(tsv_path, rm_stress=True):
- uid2phns = {}
- with open(tsv_path) as f:
- for line in f:
- uid, phns = line.rstrip().split("\t")
- phns = phns.split(",")
- if rm_stress:
- phns = [re.sub("[0-9]", "", phn) for phn in phns]
- uid2phns[uid] = phns
- return uid2phns
-
-
-def read_lab(tsv_path, lab_path, pad_len=0, upsample=1):
- """
- tsv is needed to retrieve the uids for the labels
- """
- with open(tsv_path) as f:
- f.readline()
- uids = [op.splitext(op.basename(line.rstrip().split()[0]))[0] for line in f]
- with open(lab_path) as f:
- labs_list = [pad(line.rstrip().split(), pad_len).repeat(upsample) for line in f]
- assert len(uids) == len(labs_list)
- return dict(zip(uids, labs_list))
-
-
-def main_lab_lab(
- tsv_dir,
- lab_dir,
- lab_name,
- lab_sets,
- ref_dir,
- ref_name,
- pad_len=0,
- upsample=1,
- verbose=False,
-):
- # assume tsv_dir is the same for both the reference and the hypotheses
- tsv_dir = lab_dir if tsv_dir is None else tsv_dir
-
- uid2refs = {}
- for s in lab_sets:
- uid2refs.update(read_lab(f"{tsv_dir}/{s}.tsv", f"{ref_dir}/{s}.{ref_name}"))
-
- uid2hyps = {}
- for s in lab_sets:
- uid2hyps.update(
- read_lab(
- f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
- )
- )
- _main(uid2refs, uid2hyps, verbose)
-
-
-def main_phn_lab(
- tsv_dir,
- lab_dir,
- lab_name,
- lab_sets,
- phn_dir,
- phn_sets,
- pad_len=0,
- upsample=1,
- verbose=False,
-):
- uid2refs = {}
- for s in phn_sets:
- uid2refs.update(read_phn(f"{phn_dir}/{s}.tsv"))
-
- uid2hyps = {}
- tsv_dir = lab_dir if tsv_dir is None else tsv_dir
- for s in lab_sets:
- uid2hyps.update(
- read_lab(
- f"{tsv_dir}/{s}.tsv", f"{lab_dir}/{s}.{lab_name}", pad_len, upsample
- )
- )
- _main(uid2refs, uid2hyps, verbose)
-
-
-def _main(uid2refs, uid2hyps, verbose):
- (p_xy, ref2pid, hyp2lid, tot, frmdiff, skipped) = comp_joint_prob(
- uid2refs, uid2hyps
- )
- ref_pur_by_hyp, ref_pur = comp_purity(p_xy, axis=0)
- hyp_pur_by_ref, hyp_pur = comp_purity(p_xy, axis=1)
- (mi, mi_norm_by_ref, mi_norm_by_hyp, h_ref, h_hyp) = comp_norm_mutual_info(p_xy)
- outputs = {
- "ref pur": ref_pur,
- "hyp pur": hyp_pur,
- "H(ref)": h_ref,
- "H(hyp)": h_hyp,
- "MI": mi,
- "MI/H(ref)": mi_norm_by_ref,
- "ref segL": comp_avg_seg_dur(uid2refs.values()),
- "hyp segL": comp_avg_seg_dur(uid2hyps.values()),
- "p_xy shape": p_xy.shape,
- "frm tot": tot,
- "frm diff": frmdiff,
- "utt tot": len(uid2refs),
- "utt miss": len(skipped),
- }
- print(tabulate([outputs.values()], outputs.keys(), floatfmt=".4f"))
-
-
-if __name__ == "__main__":
- """
- compute quality of labels with respect to phone or another labels if set
- """
- import argparse
-
- parser = argparse.ArgumentParser()
- parser.add_argument("tsv_dir")
- parser.add_argument("lab_dir")
- parser.add_argument("lab_name")
- parser.add_argument("--lab_sets", default=["valid"], type=str, nargs="+")
- parser.add_argument(
- "--phn_dir",
- default="/checkpoint/wnhsu/data/librispeech/960h/fa/raw_phn/phone_frame_align_v1",
- )
- parser.add_argument(
- "--phn_sets", default=["dev-clean", "dev-other"], type=str, nargs="+"
- )
- parser.add_argument("--pad_len", default=0, type=int, help="padding for hypotheses")
- parser.add_argument(
- "--upsample", default=1, type=int, help="upsample factor for hypotheses"
- )
- parser.add_argument("--ref_lab_dir", default="")
- parser.add_argument("--ref_lab_name", default="")
- parser.add_argument("--verbose", action="store_true")
- args = parser.parse_args()
-
- if args.ref_lab_dir and args.ref_lab_name:
- main_lab_lab(
- args.tsv_dir,
- args.lab_dir,
- args.lab_name,
- args.lab_sets,
- args.ref_lab_dir,
- args.ref_lab_name,
- args.pad_len,
- args.upsample,
- args.verbose,
- )
- else:
- main_phn_lab(
- args.tsv_dir,
- args.lab_dir,
- args.lab_name,
- args.lab_sets,
- args.phn_dir,
- args.phn_sets,
- args.pad_len,
- args.upsample,
- args.verbose,
- )
diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/README.md b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/README.md
deleted file mode 100644
index b155e855f2f94e30ad22262f260008fda8ac1804..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/discriminative_reranking_nmt/README.md
+++ /dev/null
@@ -1,202 +0,0 @@
-# Discriminative Reranking for Neural Machine Translation
-https://aclanthology.org/2021.acl-long.563/
-
-This folder contains source code for training DrNMT, a discriminatively trained reranker for neural machine translation.
-
-## Data preparation
-1. Follow the instructions under `examples/translation` to build a base MT model. Prepare three files, one with source sentences, one with ground truth target sentences, and one with hypotheses generated from the base MT model. Each line in the file contains one sentence in raw text (i.e. no sentencepiece, etc.). Below is an example of the files with _N_ hypotheses for each source sentence.
-
-```
-# Example of the source sentence file: (The file should contain L lines.)
-
-source_sentence_1
-source_sentence_2
-source_sentence_3
-...
-source_sentence_L
-
-# Example of the target sentence file: (The file should contain L lines.)
-
-target_sentence_1
-target_sentence_2
-target_sentence_3
-...
-target_sentence_L
-
-# Example of the hypotheses file: (The file should contain L*N lines.)
-
-source_sentence_1_hypo_1
-source_sentence_1_hypo_2
-...
-source_sentence_1_hypo_N
-source_sentence_2_hypo_1
-...
-source_sentence_2_hypo_N
-...
-source_sentence_L_hypo_1
-...
-source_sentence_L_hypo_N
-```
-
-2. Download the [XLMR model](https://github.com/fairinternal/fairseq-py/tree/main/examples/xlmr#pre-trained-models).
-```
-wget https://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz
-tar zxvf xlmr.base.tar.gz
-
-# The folder should contain dict.txt, model.pt and sentencepiece.bpe.model.
-```
-
-3. Prepare scores and BPE data.
-* `N`: Number of hypotheses per each source sentence. We use 50 in the paper.
-* `SPLIT`: Name of the data split, i.e. train, valid, test. Use split_name, split_name1, split_name2, ..., if there are multiple datasets for a split, e.g. train, train1, valid, valid1.
-* `NUM_SHARDS`: Number of shards. Set this to 1 for non-train splits.
-* `METRIC`: The metric for DrNMT to optimize for. We support either `bleu` or `ter`.
-```
-# For each data split, e.g. train, valid, test, etc., run the following:
-
-SOURCE_FILE=/path/to/source_sentence_file
-TARGET_FILE=/path/to/target_sentence_file
-HYPO_FILE=/path/to/hypo_file
-XLMR_DIR=/path/to/xlmr
-OUTPUT_DIR=/path/to/output
-
-python scripts/prep_data.py \
- --input-source ${SOURCE_FILE} \
- --input-target ${TARGET_FILE} \
- --input-hypo ${HYPO_FILE} \
- --output-dir ${OUTPUT_DIR} \
- --split $SPLIT
- --beam $N \
- --sentencepiece-model ${XLMR_DIR}/sentencepiece.bpe.model \
- --metric $METRIC \
- --num-shards ${NUM_SHARDS}
-
-# The script will create ${OUTPUT_DIR}/$METRIC with ${NUM_SHARDS} splits.
-# Under split*/input_src, split*/input_tgt and split*/$METRIC, there will be $SPLIT.bpe and $SPLIT.$METRIC files, respectively.
-
-```
-
-4. Pre-process the data into fairseq format.
-```
-# use comma to separate if there are more than one train or valid set
-for suffix in src tgt ; do
- fairseq-preprocess --only-source \
- --trainpref ${OUTPUT_DIR}/$METRIC/split1/input_${suffix}/train.bpe \
- --validpref ${OUTPUT_DIR}/$METRIC/split1/input_${suffix}/valid.bpe \
- --destdir ${OUTPUT_DIR}/$METRIC/split1/input_${suffix} \
- --workers 60 \
- --srcdict ${XLMR_DIR}/dict.txt
-done
-
-for i in `seq 2 ${NUM_SHARDS}`; do
- for suffix in src tgt ; do
- fairseq-preprocess --only-source \
- --trainpref ${OUTPUT_DIR}/$METRIC/split${i}/input_${suffix}/train.bpe \
- --destdir ${OUTPUT_DIR}/$METRIC/split${i}/input_${suffix} \
- --workers 60 \
- --srcdict ${XLMR_DIR}/dict.txt
-
- ln -s ${OUTPUT_DIR}/$METRIC/split1/input_${suffix}/valid* ${OUTPUT_DIR}/$METRIC/split${i}/input_${suffix}/.
- done
-
- ln -s ${OUTPUT_DIR}/$METRIC/split1/$METRIC/valid* ${OUTPUT_DIR}/$METRIC/split${i}/$METRIC/.
-done
-```
-
-## Training
-
-```
-EXP_DIR=/path/to/exp
-
-# An example of training the model with the config for De-En experiment in the paper.
-# The config uses 16 GPUs and 50 hypotheses.
-# For training with fewer number of GPUs, set
-# distributed_training.distributed_world_size=k +optimization.update_freq='[x]' where x = 16/k
-# For training with fewer number of hypotheses, set
-# task.mt_beam=N dataset.batch_size=N dataset.required_batch_size_multiple=N
-
-fairseq-hydra-train -m \
- --config-dir config/ --config-name deen \
- task.data=${OUTPUT_DIR}/$METRIC/split1/ \
- task.num_data_splits=${NUM_SHARDS} \
- model.pretrained_model=${XLMR_DIR}/model.pt \
- common.user_dir=${FAIRSEQ_ROOT}/examples/discriminative_reranking_nmt \
- checkpoint.save_dir=${EXP_DIR}
-
-```
-
-## Inference & scoring
-Perform DrNMT reranking (fw + reranker score)
-1. Tune weights on valid sets.
-```
-# genrate N hypotheses with the base MT model (fw score)
-VALID_SOURCE_FILE=/path/to/source_sentences # one sentence per line, converted to the sentencepiece used by the base MT model
-VALID_TARGET_FILE=/path/to/target_sentences # one sentence per line in raw text, i.e. no sentencepiece and tokenization
-MT_MODEL=/path/to/mt_model
-MT_DATA_PATH=/path/to/mt_data
-
-cat ${VALID_SOURCE_FILE} | \
- fairseq-interactive ${MT_DATA_PATH} \
- --max-tokens 4000 --buffer-size 16 \
- --num-workers 32 --path ${MT_MODEL} \
- --beam $N --nbest $N \
- --post-process sentencepiece &> valid-hypo.out
-
-# replace "bleu" with "ter" to optimize for TER
-python drnmt_rerank.py \
- ${OUTPUT_DIR}/$METRIC/split1/ \
- --path ${EXP_DIR}/checkpoint_best.pt \
- --in-text valid-hypo.out \
- --results-path ${EXP_DIR} \
- --gen-subset valid \
- --target-text ${VALID_TARGET_FILE} \
- --user-dir ${FAIRSEQ_ROOT}/examples/discriminative_reranking_nmt \
- --bpe sentencepiece \
- --sentencepiece-model ${XLMR_DIR}/sentencepiece.bpe.model \
- --beam $N \
- --batch-size $N \
- --metric bleu \
- --tune
-
-```
-
-2. Apply best weights on test sets
-```
-# genrate N hypotheses with the base MT model (fw score)
-TEST_SOURCE_FILE=/path/to/source_sentences # one sentence per line, converted to the sentencepiece used by the base MT model
-
-cat ${TEST_SOURCE_FILE} | \
- fairseq-interactive ${MT_DATA_PATH} \
- --max-tokens 4000 --buffer-size 16 \
- --num-workers 32 --path ${MT_MODEL} \
- --beam $N --nbest $N \
- --post-process sentencepiece &> test-hypo.out
-
-# replace "bleu" with "ter" to evaluate TER
-# Add --target-text for evaluating BLEU/TER,
-# otherwise the script will only generate the hypotheses with the highest scores only.
-python drnmt_rerank.py \
- ${OUTPUT_DIR}/$METRIC/split1/ \
- --path ${EXP_DIR}/checkpoint_best.pt \
- --in-text test-hypo.out \
- --results-path ${EXP_DIR} \
- --gen-subset test \
- --user-dir ${FAIRSEQ_ROOT}/examples/discriminative_reranking_nmt \
- --bpe sentencepiece \
- --sentencepiece-model ${XLMR_DIR}/sentencepiece.bpe.model \
- --beam $N \
- --batch-size $N \
- --metric bleu \
- --fw-weight ${BEST_FW_WEIGHT} \
- --lenpen ${BEST_LENPEN}
-```
-
-## Citation
-```bibtex
-@inproceedings{lee2021discriminative,
- title={Discriminative Reranking for Neural Machine Translation},
- author={Lee, Ann and Auli, Michael and Ranzato, Marc'Aurelio},
- booktitle={ACL},
- year={2021}
-}
-```
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/new/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_recognition/new/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/hubert/hubert_asr.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/hubert/hubert_asr.py
deleted file mode 100644
index dce899c9de3ab68341c0b21bea749a3ee29e0d8a..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/hubert/hubert_asr.py
+++ /dev/null
@@ -1,376 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import contextlib
-from argparse import Namespace
-from typing import Any
-
-import torch
-import torch.nn as nn
-from dataclasses import dataclass, field
-from fairseq import checkpoint_utils, tasks, utils
-from fairseq.dataclass import FairseqDataclass
-from fairseq.dataclass.utils import convert_namespace_to_omegaconf
-from fairseq.models import BaseFairseqModel, FairseqEncoder, register_model
-from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES
-from fairseq.tasks import FairseqTask
-from omegaconf import II, MISSING
-
-
-@dataclass
-class HubertAsrConfig(FairseqDataclass):
- w2v_path: str = field(
- default=MISSING, metadata={"help": "path to hubert model"}
- )
- no_pretrained_weights: bool = field(
- default=False,
- metadata={"help": "if true, does not load pretrained weights"},
- )
- dropout_input: float = field(
- default=0.0,
- metadata={"help": "dropout to apply to the input (after feat extr)"},
- )
- final_dropout: float = field(
- default=0.0,
- metadata={
- "help": "dropout after transformer and before final projection"
- },
- )
- dropout: float = field(
- default=0.0,
- metadata={"help": "dropout probability inside hubert model"},
- )
- attention_dropout: float = field(
- default=0.0,
- metadata={
- "help": "dropout probability for attention weights "
- "inside hubert model"
- },
- )
- activation_dropout: float = field(
- default=0.0,
- metadata={
- "help": "dropout probability after activation in FFN "
- "inside hubert model"
- },
- )
-
- # masking
- apply_mask: bool = field(
- default=False, metadata={"help": "apply masking during fine-tuning"}
- )
- mask_length: int = field(
- default=10, metadata={"help": "repeat the mask indices multiple times"}
- )
- mask_prob: float = field(
- default=0.5,
- metadata={
- "help": "probability of replacing a token with mask "
- "(normalized by length)"
- },
- )
- mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
- default="static", metadata={"help": "how to choose masks"}
- )
- mask_other: float = field(
- default=0,
- metadata={
- "help": "secondary mask argument "
- "(used for more complex distributions), "
- "see help in compute_mask_indices"
- },
- )
- no_mask_overlap: bool = field(
- default=False, metadata={"help": "whether to allow masks to overlap"}
- )
-
- # channel masking
- mask_channel_length: int = field(
- default=10,
- metadata={"help": "length of the mask for features (channels)"},
- )
- mask_channel_prob: float = field(
- default=0.0,
- metadata={"help": "probability of replacing a feature with 0"},
- )
- mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
- default="static",
- metadata={"help": "how to choose mask length for channel masking"},
- )
- mask_channel_other: float = field(
- default=0,
- metadata={
- "help": "secondary mask argument "
- "(used for more complex distributions), "
- "see help in compute_mask_indices"
- },
- )
- no_mask_channel_overlap: bool = field(
- default=False,
- metadata={"help": "whether to allow channel masks to overlap"},
- )
- freeze_finetune_updates: int = field(
- default=0,
- metadata={"help": "dont finetune hubert for this many updates"},
- )
- feature_grad_mult: float = field(
- default=0.0,
- metadata={"help": "reset feature grad mult in hubert to this"},
- )
- layerdrop: float = field(
- default=0.0,
- metadata={"help": "probability of dropping a layer in hubert"},
- )
- normalize: bool = II("task.normalize")
- data: str = II("task.data")
-
- # this holds the loaded hubert args
- w2v_args: Any = None
-
-
-@dataclass
-class HubertCtcConfig(HubertAsrConfig):
- pass
-
-
-@register_model("hubert_ctc", dataclass=HubertCtcConfig)
-class HubertCtc(BaseFairseqModel):
- def __init__(self, cfg: HubertCtcConfig, w2v_encoder: BaseFairseqModel):
- super().__init__()
- self.cfg = cfg
- self.w2v_encoder = w2v_encoder
-
- def upgrade_state_dict_named(self, state_dict, name):
- super().upgrade_state_dict_named(state_dict, name)
- return state_dict
-
- @classmethod
- def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask):
- """Build a new model instance."""
- w2v_encoder = HubertEncoder(cfg, task.target_dictionary)
- return cls(cfg, w2v_encoder)
-
- def get_normalized_probs(self, net_output, log_probs):
- """Get normalized probabilities (or log probs) from a net's output."""
-
- logits = net_output["encoder_out"]
- if log_probs:
- return utils.log_softmax(logits.float(), dim=-1)
- else:
- return utils.softmax(logits.float(), dim=-1)
-
- def get_logits(self, net_output):
- logits = net_output["encoder_out"]
- padding = net_output["encoder_padding_mask"]
- if padding is not None and padding.any():
- padding = padding.T
- logits[padding][..., 0] = 0
- logits[padding][..., 1:] = float("-inf")
-
- return logits
-
- def forward(self, **kwargs):
- x = self.w2v_encoder(**kwargs)
- return x
-
-
-@dataclass
-class HubertSeq2SeqConfig(HubertAsrConfig):
- decoder_embed_dim: int = field(
- default=768, metadata={"help": "decoder embedding dimension"}
- )
- decoder_ffn_embed_dim: int = field(
- default=3072, metadata={"help": "decoder embedding dimension for FFN"}
- )
- decoder_layers: int = field(
- default=6, metadata={"help": "num of decoder layers"}
- )
- decoder_layerdrop: float = field(
- default=0.0, metadata={"help": "decoder layerdrop chance"}
- )
- decoder_attention_heads: int = field(
- default=4, metadata={"help": "num decoder attention heads"}
- )
- decoder_learned_pos: bool = field(
- default=False,
- metadata={"help": "use learned positional embeddings in the decoder"},
- )
- decoder_normalize_before: bool = field(
- default=False,
- metadata={"help": "apply layernorm before each decoder block"},
- )
- no_token_positional_embeddings: bool = field(
- default=False,
- metadata={
- "help": "if set, disables positional embeddings "
- "(outside self attention)"
- },
- )
- decoder_dropout: float = field(
- default=0.0, metadata={"help": "dropout probability in the decoder"}
- )
- decoder_attention_dropout: float = field(
- default=0.0,
- metadata={
- "help": "dropout probability for attention weights "
- "inside the decoder"
- },
- )
- decoder_activation_dropout: float = field(
- default=0.0,
- metadata={
- "help": "dropout probability after activation in FFN "
- "inside the decoder"
- },
- )
- max_target_positions: int = field(
- default=2048, metadata={"help": "max target positions"}
- )
- share_decoder_input_output_embed: bool = field(
- default=False,
- metadata={"help": "share decoder input and output embeddings"},
- )
-
-
-class HubertEncoder(FairseqEncoder):
- def __init__(self, cfg: HubertAsrConfig, tgt_dict=None):
- self.apply_mask = cfg.apply_mask
-
- arg_overrides = {
- "dropout": cfg.dropout,
- "activation_dropout": cfg.activation_dropout,
- "dropout_input": cfg.dropout_input,
- "attention_dropout": cfg.attention_dropout,
- "mask_length": cfg.mask_length,
- "mask_prob": cfg.mask_prob,
- "mask_selection": cfg.mask_selection,
- "mask_other": cfg.mask_other,
- "no_mask_overlap": cfg.no_mask_overlap,
- "mask_channel_length": cfg.mask_channel_length,
- "mask_channel_prob": cfg.mask_channel_prob,
- "mask_channel_selection": cfg.mask_channel_selection,
- "mask_channel_other": cfg.mask_channel_other,
- "no_mask_channel_overlap": cfg.no_mask_channel_overlap,
- "encoder_layerdrop": cfg.layerdrop,
- "feature_grad_mult": cfg.feature_grad_mult,
- }
-
- if cfg.w2v_args is None:
- state = checkpoint_utils.load_checkpoint_to_cpu(
- cfg.w2v_path, arg_overrides
- )
- w2v_args = state.get("cfg", None)
- if w2v_args is None:
- w2v_args = convert_namespace_to_omegaconf(state["args"])
- cfg.w2v_args = w2v_args
- else:
- state = None
- w2v_args = cfg.w2v_args
- if isinstance(w2v_args, Namespace):
- cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(
- w2v_args
- )
-
- assert cfg.normalize == w2v_args.task.normalize, (
- "Fine-tuning works best when data normalization is the same. "
- "Please check that --normalize is set or unset for "
- "both pre-training and here"
- )
-
- w2v_args.task.data = cfg.data
- task = tasks.setup_task(w2v_args.task)
- if state is not None and "task_state" in state:
- # This will load the stored "dictionaries" object
- task.load_state_dict(state["task_state"])
- model = task.build_model(w2v_args.model)
-
- if state is not None and not cfg.no_pretrained_weights:
- # set strict=False because we omit some modules
- model.load_state_dict(state["model"], strict=False)
-
- model.remove_pretraining_modules()
-
- super().__init__(task.source_dictionary)
-
- d = w2v_args.model.encoder_embed_dim
-
- self.w2v_model = model
-
- self.final_dropout = nn.Dropout(cfg.final_dropout)
- self.freeze_finetune_updates = cfg.freeze_finetune_updates
- self.num_updates = 0
-
- if tgt_dict is not None:
- self.proj = Linear(d, len(tgt_dict))
- elif getattr(cfg, "decoder_embed_dim", d) != d:
- self.proj = Linear(d, cfg.decoder_embed_dim)
- else:
- self.proj = None
-
- def set_num_updates(self, num_updates):
- """Set the number of parameters updates."""
- super().set_num_updates(num_updates)
- self.num_updates = num_updates
-
- def forward(self, source, padding_mask, tbc=True, **kwargs):
-
- w2v_args = {
- "source": source,
- "padding_mask": padding_mask,
- "mask": self.apply_mask and self.training,
- }
-
- ft = self.freeze_finetune_updates <= self.num_updates
-
- with torch.no_grad() if not ft else contextlib.ExitStack():
- x, padding_mask = self.w2v_model.extract_features(**w2v_args)
-
- if tbc:
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- x = self.final_dropout(x)
-
- if self.proj:
- x = self.proj(x)
-
- return {
- "encoder_out": x, # T x B x C
- "encoder_padding_mask": padding_mask, # B x T
- "padding_mask": padding_mask,
- }
-
- def reorder_encoder_out(self, encoder_out, new_order):
- if encoder_out["encoder_out"] is not None:
- encoder_out["encoder_out"] = encoder_out[
- "encoder_out"
- ].index_select(1, new_order)
- if encoder_out["encoder_padding_mask"] is not None:
- encoder_out["encoder_padding_mask"] = encoder_out[
- "encoder_padding_mask"
- ].index_select(0, new_order)
- return encoder_out
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- return None
-
- def upgrade_state_dict_named(self, state_dict, name):
- return state_dict
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
- nn.init.constant_(m.weight[padding_idx], 0)
- return m
-
-
-def Linear(in_features, out_features, bias=True):
- m = nn.Linear(in_features, out_features, bias)
- nn.init.xavier_uniform_(m.weight)
- if bias:
- nn.init.constant_(m.bias, 0.0)
- return m
diff --git a/spaces/OPM-TECH/CompVis-stable-diffusion-v1-4/README.md b/spaces/OPM-TECH/CompVis-stable-diffusion-v1-4/README.md
deleted file mode 100644
index 646b66e4f260159572f083a109faa5af7cf38639..0000000000000000000000000000000000000000
--- a/spaces/OPM-TECH/CompVis-stable-diffusion-v1-4/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: CompVis Stable Diffusion V1 4
-emoji: 💻
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/upsampling.py b/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/upsampling.py
deleted file mode 100644
index 78ae6dde07ee812aea646b3d3c310c4966058914..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/upsampling.py
+++ /dev/null
@@ -1,104 +0,0 @@
-from pathlib import Path
-
-import cv2
-from diffusers.utils import logging
-from huggingface_hub import hf_hub_download
-from PIL import Image
-from torch import nn
-
-try:
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from realesrgan import RealESRGANer
-except ImportError as e:
- raise ImportError(
- "You tried to import realesrgan without having it installed properly. To install Real-ESRGAN, run:\n\n"
- "pip install realesrgan"
- )
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class RealESRGANModel(nn.Module):
- def __init__(self, model_path, tile=0, tile_pad=10, pre_pad=0, fp32=False):
- super().__init__()
- try:
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from realesrgan import RealESRGANer
- except ImportError as e:
- raise ImportError(
- "You tried to import realesrgan without having it installed properly. To install Real-ESRGAN, run:\n\n"
- "pip install realesrgan"
- )
-
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
- self.upsampler = RealESRGANer(
- scale=4, model_path=model_path, model=model, tile=tile, tile_pad=tile_pad, pre_pad=pre_pad, half=not fp32
- )
-
- def forward(self, image, outscale=4, convert_to_pil=True):
- """Upsample an image array or path.
- Args:
- image (Union[np.ndarray, str]): Either a np array or an image path. np array is assumed to be in RGB format,
- and we convert it to BGR.
- outscale (int, optional): Amount to upscale the image. Defaults to 4.
- convert_to_pil (bool, optional): If True, return PIL image. Otherwise, return numpy array (BGR). Defaults to True.
- Returns:
- Union[np.ndarray, PIL.Image.Image]: An upsampled version of the input image.
- """
- if isinstance(image, (str, Path)):
- img = cv2.imread(image, cv2.IMREAD_UNCHANGED)
- else:
- img = image
- img = (img * 255).round().astype("uint8")
- img = img[:, :, ::-1]
-
- image, _ = self.upsampler.enhance(img, outscale=outscale)
-
- if convert_to_pil:
- image = Image.fromarray(image[:, :, ::-1])
-
- return image
-
- @classmethod
- def from_pretrained(cls, model_name_or_path="nateraw/real-esrgan"):
- """Initialize a pretrained Real-ESRGAN upsampler.
- Example:
- ```python
- >>> from stable_diffusion_videos import PipelineRealESRGAN
- >>> pipe = PipelineRealESRGAN.from_pretrained('nateraw/real-esrgan')
- >>> im_out = pipe('input_img.jpg')
- ```
- Args:
- model_name_or_path (str, optional): The Hugging Face repo ID or path to local model. Defaults to 'nateraw/real-esrgan'.
- Returns:
- stable_diffusion_videos.PipelineRealESRGAN: An instance of `PipelineRealESRGAN` instantiated from pretrained model.
- """
- # reuploaded form official ones mentioned here:
- # https://github.com/xinntao/Real-ESRGAN
- if Path(model_name_or_path).exists():
- file = model_name_or_path
- else:
- file = hf_hub_download(model_name_or_path, "RealESRGAN_x4plus.pth")
- return cls(file)
-
- def upsample_imagefolder(self, in_dir, out_dir, suffix="out", outfile_ext=".png", recursive=False, force=False):
- in_dir, out_dir = Path(in_dir), Path(out_dir)
- if not in_dir.exists():
- raise FileNotFoundError(f"Provided input directory {in_dir} does not exist")
-
- out_dir.mkdir(exist_ok=True, parents=True)
-
- generator = in_dir.rglob("*") if recursive else in_dir.glob("*")
- image_paths = [x for x in generator if x.suffix.lower() in [".png", ".jpg", ".jpeg"]]
- n_img = len(image_paths)
- for i, image in enumerate(image_paths):
- out_filepath = out_dir / (str(image.relative_to(in_dir).with_suffix("")) + suffix + outfile_ext)
- if not force and out_filepath.exists():
- logger.info(
- f"[{i}/{n_img}] {out_filepath} already exists, skipping. To avoid skipping, pass force=True."
- )
- continue
- logger.info(f"[{i}/{n_img}] upscaling {image}")
- im = self(str(image))
- out_filepath.parent.mkdir(parents=True, exist_ok=True)
- im.save(out_filepath)
diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/losses/fid/inception.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/losses/fid/inception.py
deleted file mode 100644
index e9bd0863b457aaa40c770eaa4acbb142b18fc18b..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/evaluation/losses/fid/inception.py
+++ /dev/null
@@ -1,323 +0,0 @@
-import logging
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torchvision import models
-
-try:
- from torchvision.models.utils import load_state_dict_from_url
-except ImportError:
- from torch.utils.model_zoo import load_url as load_state_dict_from_url
-
-# Inception weights ported to Pytorch from
-# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
-FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
-
-
-LOGGER = logging.getLogger(__name__)
-
-
-class InceptionV3(nn.Module):
- """Pretrained InceptionV3 network returning feature maps"""
-
- # Index of default block of inception to return,
- # corresponds to output of final average pooling
- DEFAULT_BLOCK_INDEX = 3
-
- # Maps feature dimensionality to their output blocks indices
- BLOCK_INDEX_BY_DIM = {
- 64: 0, # First max pooling features
- 192: 1, # Second max pooling featurs
- 768: 2, # Pre-aux classifier features
- 2048: 3 # Final average pooling features
- }
-
- def __init__(self,
- output_blocks=[DEFAULT_BLOCK_INDEX],
- resize_input=True,
- normalize_input=True,
- requires_grad=False,
- use_fid_inception=True):
- """Build pretrained InceptionV3
-
- Parameters
- ----------
- output_blocks : list of int
- Indices of blocks to return features of. Possible values are:
- - 0: corresponds to output of first max pooling
- - 1: corresponds to output of second max pooling
- - 2: corresponds to output which is fed to aux classifier
- - 3: corresponds to output of final average pooling
- resize_input : bool
- If true, bilinearly resizes input to width and height 299 before
- feeding input to model. As the network without fully connected
- layers is fully convolutional, it should be able to handle inputs
- of arbitrary size, so resizing might not be strictly needed
- normalize_input : bool
- If true, scales the input from range (0, 1) to the range the
- pretrained Inception network expects, namely (-1, 1)
- requires_grad : bool
- If true, parameters of the model require gradients. Possibly useful
- for finetuning the network
- use_fid_inception : bool
- If true, uses the pretrained Inception model used in Tensorflow's
- FID implementation. If false, uses the pretrained Inception model
- available in torchvision. The FID Inception model has different
- weights and a slightly different structure from torchvision's
- Inception model. If you want to compute FID scores, you are
- strongly advised to set this parameter to true to get comparable
- results.
- """
- super(InceptionV3, self).__init__()
-
- self.resize_input = resize_input
- self.normalize_input = normalize_input
- self.output_blocks = sorted(output_blocks)
- self.last_needed_block = max(output_blocks)
-
- assert self.last_needed_block <= 3, \
- 'Last possible output block index is 3'
-
- self.blocks = nn.ModuleList()
-
- if use_fid_inception:
- inception = fid_inception_v3()
- else:
- inception = models.inception_v3(pretrained=True)
-
- # Block 0: input to maxpool1
- block0 = [
- inception.Conv2d_1a_3x3,
- inception.Conv2d_2a_3x3,
- inception.Conv2d_2b_3x3,
- nn.MaxPool2d(kernel_size=3, stride=2)
- ]
- self.blocks.append(nn.Sequential(*block0))
-
- # Block 1: maxpool1 to maxpool2
- if self.last_needed_block >= 1:
- block1 = [
- inception.Conv2d_3b_1x1,
- inception.Conv2d_4a_3x3,
- nn.MaxPool2d(kernel_size=3, stride=2)
- ]
- self.blocks.append(nn.Sequential(*block1))
-
- # Block 2: maxpool2 to aux classifier
- if self.last_needed_block >= 2:
- block2 = [
- inception.Mixed_5b,
- inception.Mixed_5c,
- inception.Mixed_5d,
- inception.Mixed_6a,
- inception.Mixed_6b,
- inception.Mixed_6c,
- inception.Mixed_6d,
- inception.Mixed_6e,
- ]
- self.blocks.append(nn.Sequential(*block2))
-
- # Block 3: aux classifier to final avgpool
- if self.last_needed_block >= 3:
- block3 = [
- inception.Mixed_7a,
- inception.Mixed_7b,
- inception.Mixed_7c,
- nn.AdaptiveAvgPool2d(output_size=(1, 1))
- ]
- self.blocks.append(nn.Sequential(*block3))
-
- for param in self.parameters():
- param.requires_grad = requires_grad
-
- def forward(self, inp):
- """Get Inception feature maps
-
- Parameters
- ----------
- inp : torch.autograd.Variable
- Input tensor of shape Bx3xHxW. Values are expected to be in
- range (0, 1)
-
- Returns
- -------
- List of torch.autograd.Variable, corresponding to the selected output
- block, sorted ascending by index
- """
- outp = []
- x = inp
-
- if self.resize_input:
- x = F.interpolate(x,
- size=(299, 299),
- mode='bilinear',
- align_corners=False)
-
- if self.normalize_input:
- x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
-
- for idx, block in enumerate(self.blocks):
- x = block(x)
- if idx in self.output_blocks:
- outp.append(x)
-
- if idx == self.last_needed_block:
- break
-
- return outp
-
-
-def fid_inception_v3():
- """Build pretrained Inception model for FID computation
-
- The Inception model for FID computation uses a different set of weights
- and has a slightly different structure than torchvision's Inception.
-
- This method first constructs torchvision's Inception and then patches the
- necessary parts that are different in the FID Inception model.
- """
- LOGGER.info('fid_inception_v3 called')
- inception = models.inception_v3(num_classes=1008,
- aux_logits=False,
- pretrained=False)
- LOGGER.info('models.inception_v3 done')
- inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
- inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
- inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
- inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
- inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
- inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
- inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
- inception.Mixed_7b = FIDInceptionE_1(1280)
- inception.Mixed_7c = FIDInceptionE_2(2048)
-
- LOGGER.info('fid_inception_v3 patching done')
-
- state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
- LOGGER.info('fid_inception_v3 weights downloaded')
-
- inception.load_state_dict(state_dict)
- LOGGER.info('fid_inception_v3 weights loaded into model')
-
- return inception
-
-
-class FIDInceptionA(models.inception.InceptionA):
- """InceptionA block patched for FID computation"""
- def __init__(self, in_channels, pool_features):
- super(FIDInceptionA, self).__init__(in_channels, pool_features)
-
- def forward(self, x):
- branch1x1 = self.branch1x1(x)
-
- branch5x5 = self.branch5x5_1(x)
- branch5x5 = self.branch5x5_2(branch5x5)
-
- branch3x3dbl = self.branch3x3dbl_1(x)
- branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
- branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
-
- # Patch: Tensorflow's average pool does not use the padded zero's in
- # its average calculation
- branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
- count_include_pad=False)
- branch_pool = self.branch_pool(branch_pool)
-
- outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
- return torch.cat(outputs, 1)
-
-
-class FIDInceptionC(models.inception.InceptionC):
- """InceptionC block patched for FID computation"""
- def __init__(self, in_channels, channels_7x7):
- super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
-
- def forward(self, x):
- branch1x1 = self.branch1x1(x)
-
- branch7x7 = self.branch7x7_1(x)
- branch7x7 = self.branch7x7_2(branch7x7)
- branch7x7 = self.branch7x7_3(branch7x7)
-
- branch7x7dbl = self.branch7x7dbl_1(x)
- branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
- branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
- branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
- branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
-
- # Patch: Tensorflow's average pool does not use the padded zero's in
- # its average calculation
- branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
- count_include_pad=False)
- branch_pool = self.branch_pool(branch_pool)
-
- outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
- return torch.cat(outputs, 1)
-
-
-class FIDInceptionE_1(models.inception.InceptionE):
- """First InceptionE block patched for FID computation"""
- def __init__(self, in_channels):
- super(FIDInceptionE_1, self).__init__(in_channels)
-
- def forward(self, x):
- branch1x1 = self.branch1x1(x)
-
- branch3x3 = self.branch3x3_1(x)
- branch3x3 = [
- self.branch3x3_2a(branch3x3),
- self.branch3x3_2b(branch3x3),
- ]
- branch3x3 = torch.cat(branch3x3, 1)
-
- branch3x3dbl = self.branch3x3dbl_1(x)
- branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
- branch3x3dbl = [
- self.branch3x3dbl_3a(branch3x3dbl),
- self.branch3x3dbl_3b(branch3x3dbl),
- ]
- branch3x3dbl = torch.cat(branch3x3dbl, 1)
-
- # Patch: Tensorflow's average pool does not use the padded zero's in
- # its average calculation
- branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
- count_include_pad=False)
- branch_pool = self.branch_pool(branch_pool)
-
- outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
- return torch.cat(outputs, 1)
-
-
-class FIDInceptionE_2(models.inception.InceptionE):
- """Second InceptionE block patched for FID computation"""
- def __init__(self, in_channels):
- super(FIDInceptionE_2, self).__init__(in_channels)
-
- def forward(self, x):
- branch1x1 = self.branch1x1(x)
-
- branch3x3 = self.branch3x3_1(x)
- branch3x3 = [
- self.branch3x3_2a(branch3x3),
- self.branch3x3_2b(branch3x3),
- ]
- branch3x3 = torch.cat(branch3x3, 1)
-
- branch3x3dbl = self.branch3x3dbl_1(x)
- branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
- branch3x3dbl = [
- self.branch3x3dbl_3a(branch3x3dbl),
- self.branch3x3dbl_3b(branch3x3dbl),
- ]
- branch3x3dbl = torch.cat(branch3x3dbl, 1)
-
- # Patch: The FID Inception model uses max pooling instead of average
- # pooling. This is likely an error in this specific Inception
- # implementation, as other Inception models use average pooling here
- # (which matches the description in the paper).
- branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
- branch_pool = self.branch_pool(branch_pool)
-
- outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
- return torch.cat(outputs, 1)
diff --git a/spaces/PSLD/PSLD/stable-diffusion/debug/generate.sh b/spaces/PSLD/PSLD/stable-diffusion/debug/generate.sh
deleted file mode 100644
index 5535a44761cd43aa061066fcec3aef87e77b5e3c..0000000000000000000000000000000000000000
--- a/spaces/PSLD/PSLD/stable-diffusion/debug/generate.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-export CUDA_VISIBLE_DEVICES='2'
-python scripts/inverse.py \
- --file_id='00019.png' \
- --task_config='configs/motion_deblur_config.yaml' \
- --inpainting=0 \
- --general_inverse=0 \
- --gamma=1e-1 \
- --omega=1e-1 \
- --W=256 \
- --H=256 \
- --scale=5.0 \
- --laion400m \
- --prompt="a photograph of fantasy landscape trending in art station" \
- --outdir="outputs/txt2img-samples-laion400m"
\ No newline at end of file
diff --git a/spaces/PYTHONOPTIC/FOCUSGUMMY/PYTHON.py b/spaces/PYTHONOPTIC/FOCUSGUMMY/PYTHON.py
deleted file mode 100644
index 2490ce1ae92a71d70d50d6127dc6adc3c1937490..0000000000000000000000000000000000000000
--- a/spaces/PYTHONOPTIC/FOCUSGUMMY/PYTHON.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import openai
-import gradio
-
-openai.api_key = "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"
-
-messages = [{"role": "system", "content": "You are an expert in high precision sport optics and can answer anyquestions related to binoculars, telescopes, monoculars and spotting scopes"}]
-
-def CustomChatGPT(user_input):
- messages.append({"role": "user", "content": user_input})
- response = openai.ChatCompletion.create(
- model = "gpt-3.5-turbo",
- messages = messages
- )
- ChatGPT_reply = response["choices"][0]["message"]["content"]
- messages.append({"role": "assistant", "content": ChatGPT_reply})
- return ChatGPT_reply
-
-demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Ask me anything about optics.")
-
-demo.launch(share=True)
\ No newline at end of file
diff --git a/spaces/Parthjain9925/DigitRecognizer/README.md b/spaces/Parthjain9925/DigitRecognizer/README.md
deleted file mode 100644
index 625a18678aad636bcae5f8cdce87035cee933154..0000000000000000000000000000000000000000
--- a/spaces/Parthjain9925/DigitRecognizer/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: DigitRecognizer
-emoji: 📊
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 3.1.4
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/PascalLiu/FNeVR_demo/augmentation.py b/spaces/PascalLiu/FNeVR_demo/augmentation.py
deleted file mode 100644
index 50d03203aaec2a59fb2671bdeccfae1d214f607c..0000000000000000000000000000000000000000
--- a/spaces/PascalLiu/FNeVR_demo/augmentation.py
+++ /dev/null
@@ -1,345 +0,0 @@
-"""
-Code from https://github.com/hassony2/torch_videovision
-"""
-
-import numbers
-
-import random
-import numpy as np
-import PIL
-
-from skimage.transform import resize, rotate
-from skimage.util import pad
-import torchvision
-
-import warnings
-
-from skimage import img_as_ubyte, img_as_float
-
-
-def crop_clip(clip, min_h, min_w, h, w):
- if isinstance(clip[0], np.ndarray):
- cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
-
- elif isinstance(clip[0], PIL.Image.Image):
- cropped = [
- img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
- ]
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
- return cropped
-
-
-def pad_clip(clip, h, w):
- im_h, im_w = clip[0].shape[:2]
- pad_h = (0, 0) if h < im_h else ((h - im_h) // 2, (h - im_h + 1) // 2)
- pad_w = (0, 0) if w < im_w else ((w - im_w) // 2, (w - im_w + 1) // 2)
-
- return pad(clip, ((0, 0), pad_h, pad_w, (0, 0)), mode='edge')
-
-
-def resize_clip(clip, size, interpolation='bilinear'):
- if isinstance(clip[0], np.ndarray):
- if isinstance(size, numbers.Number):
- im_h, im_w, im_c = clip[0].shape
- # Min spatial dim already matches minimal size
- if (im_w <= im_h and im_w == size) or (im_h <= im_w
- and im_h == size):
- return clip
- new_h, new_w = get_resize_sizes(im_h, im_w, size)
- size = (new_w, new_h)
- else:
- size = size[1], size[0]
-
- scaled = [
- resize(img, size, order=1 if interpolation == 'bilinear' else 0, preserve_range=True,
- mode='constant', anti_aliasing=True) for img in clip
- ]
- elif isinstance(clip[0], PIL.Image.Image):
- if isinstance(size, numbers.Number):
- im_w, im_h = clip[0].size
- # Min spatial dim already matches minimal size
- if (im_w <= im_h and im_w == size) or (im_h <= im_w
- and im_h == size):
- return clip
- new_h, new_w = get_resize_sizes(im_h, im_w, size)
- size = (new_w, new_h)
- else:
- size = size[1], size[0]
- if interpolation == 'bilinear':
- pil_inter = PIL.Image.NEAREST
- else:
- pil_inter = PIL.Image.BILINEAR
- scaled = [img.resize(size, pil_inter) for img in clip]
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
- return scaled
-
-
-def get_resize_sizes(im_h, im_w, size):
- if im_w < im_h:
- ow = size
- oh = int(size * im_h / im_w)
- else:
- oh = size
- ow = int(size * im_w / im_h)
- return oh, ow
-
-
-class RandomFlip(object):
- def __init__(self, time_flip=False, horizontal_flip=False):
- self.time_flip = time_flip
- self.horizontal_flip = horizontal_flip
-
- def __call__(self, clip):
- if random.random() < 0.5 and self.time_flip:
- return clip[::-1]
- if random.random() < 0.5 and self.horizontal_flip:
- return [np.fliplr(img) for img in clip]
-
- return clip
-
-
-class RandomResize(object):
- """Resizes a list of (H x W x C) numpy.ndarray to the final size
- The larger the original image is, the more times it takes to
- interpolate
- Args:
- interpolation (str): Can be one of 'nearest', 'bilinear'
- defaults to nearest
- size (tuple): (widht, height)
- """
-
- def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
- self.ratio = ratio
- self.interpolation = interpolation
-
- def __call__(self, clip):
- scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
-
- if isinstance(clip[0], np.ndarray):
- im_h, im_w, im_c = clip[0].shape
- elif isinstance(clip[0], PIL.Image.Image):
- im_w, im_h = clip[0].size
-
- new_w = int(im_w * scaling_factor)
- new_h = int(im_h * scaling_factor)
- new_size = (new_w, new_h)
- resized = resize_clip(
- clip, new_size, interpolation=self.interpolation)
-
- return resized
-
-
-class RandomCrop(object):
- """Extract random crop at the same location for a list of videos
- Args:
- size (sequence or int): Desired output size for the
- crop in format (h, w)
- """
-
- def __init__(self, size):
- if isinstance(size, numbers.Number):
- size = (size, size)
-
- self.size = size
-
- def __call__(self, clip):
- """
- Args:
- img (PIL.Image or numpy.ndarray): List of videos to be cropped
- in format (h, w, c) in numpy.ndarray
- Returns:
- PIL.Image or numpy.ndarray: Cropped list of videos
- """
- h, w = self.size
- if isinstance(clip[0], np.ndarray):
- im_h, im_w, im_c = clip[0].shape
- elif isinstance(clip[0], PIL.Image.Image):
- im_w, im_h = clip[0].size
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
-
- clip = pad_clip(clip, h, w)
- im_h, im_w = clip.shape[1:3]
- x1 = 0 if h == im_h else random.randint(0, im_w - w)
- y1 = 0 if w == im_w else random.randint(0, im_h - h)
- cropped = crop_clip(clip, y1, x1, h, w)
-
- return cropped
-
-
-class RandomRotation(object):
- """Rotate entire clip randomly by a random angle within
- given bounds
- Args:
- degrees (sequence or int): Range of degrees to select from
- If degrees is a number instead of sequence like (min, max),
- the range of degrees, will be (-degrees, +degrees).
- """
-
- def __init__(self, degrees):
- if isinstance(degrees, numbers.Number):
- if degrees < 0:
- raise ValueError('If degrees is a single number,'
- 'must be positive')
- degrees = (-degrees, degrees)
- else:
- if len(degrees) != 2:
- raise ValueError('If degrees is a sequence,'
- 'it must be of len 2.')
-
- self.degrees = degrees
-
- def __call__(self, clip):
- """
- Args:
- img (PIL.Image or numpy.ndarray): List of videos to be cropped
- in format (h, w, c) in numpy.ndarray
- Returns:
- PIL.Image or numpy.ndarray: Cropped list of videos
- """
- angle = random.uniform(self.degrees[0], self.degrees[1])
- if isinstance(clip[0], np.ndarray):
- rotated = [rotate(image=img, angle=angle, preserve_range=True) for img in clip]
- elif isinstance(clip[0], PIL.Image.Image):
- rotated = [img.rotate(angle) for img in clip]
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
-
- return rotated
-
-
-class ColorJitter(object):
- """Randomly change the brightness, contrast and saturation and hue of the clip
- Args:
- brightness (float): How much to jitter brightness. brightness_factor
- is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
- contrast (float): How much to jitter contrast. contrast_factor
- is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
- saturation (float): How much to jitter saturation. saturation_factor
- is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
- hue(float): How much to jitter hue. hue_factor is chosen uniformly from
- [-hue, hue]. Should be >=0 and <= 0.5.
- """
-
- def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
- self.brightness = brightness
- self.contrast = contrast
- self.saturation = saturation
- self.hue = hue
-
- def get_params(self, brightness, contrast, saturation, hue):
- if brightness > 0:
- brightness_factor = random.uniform(
- max(0, 1 - brightness), 1 + brightness)
- else:
- brightness_factor = None
-
- if contrast > 0:
- contrast_factor = random.uniform(
- max(0, 1 - contrast), 1 + contrast)
- else:
- contrast_factor = None
-
- if saturation > 0:
- saturation_factor = random.uniform(
- max(0, 1 - saturation), 1 + saturation)
- else:
- saturation_factor = None
-
- if hue > 0:
- hue_factor = random.uniform(-hue, hue)
- else:
- hue_factor = None
- return brightness_factor, contrast_factor, saturation_factor, hue_factor
-
- def __call__(self, clip):
- """
- Args:
- clip (list): list of PIL.Image
- Returns:
- list PIL.Image : list of transformed PIL.Image
- """
- if isinstance(clip[0], np.ndarray):
- brightness, contrast, saturation, hue = self.get_params(
- self.brightness, self.contrast, self.saturation, self.hue)
-
- # Create img transform function sequence
- img_transforms = []
- if brightness is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
- if saturation is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
- if hue is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
- if contrast is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
- random.shuffle(img_transforms)
- img_transforms = [img_as_ubyte, torchvision.transforms.ToPILImage()] + img_transforms + [np.array,
- img_as_float]
-
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- jittered_clip = []
- for img in clip:
- jittered_img = img
- for func in img_transforms:
- jittered_img = func(jittered_img)
- jittered_clip.append(jittered_img.astype('float32'))
- elif isinstance(clip[0], PIL.Image.Image):
- brightness, contrast, saturation, hue = self.get_params(
- self.brightness, self.contrast, self.saturation, self.hue)
-
- # Create img transform function sequence
- img_transforms = []
- if brightness is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
- if saturation is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
- if hue is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
- if contrast is not None:
- img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
- random.shuffle(img_transforms)
-
- # Apply to all videos
- jittered_clip = []
- for img in clip:
- for func in img_transforms:
- jittered_img = func(img)
- jittered_clip.append(jittered_img)
-
- else:
- raise TypeError('Expected numpy.ndarray or PIL.Image' +
- 'but got list of {0}'.format(type(clip[0])))
- return jittered_clip
-
-
-class AllAugmentationTransform:
- def __init__(self, resize_param=None, rotation_param=None, flip_param=None, crop_param=None, jitter_param=None):
- self.transforms = []
-
- if flip_param is not None:
- self.transforms.append(RandomFlip(**flip_param))
-
- if rotation_param is not None:
- self.transforms.append(RandomRotation(**rotation_param))
-
- if resize_param is not None:
- self.transforms.append(RandomResize(**resize_param))
-
- if crop_param is not None:
- self.transforms.append(RandomCrop(**crop_param))
-
- if jitter_param is not None:
- self.transforms.append(ColorJitter(**jitter_param))
-
- def __call__(self, clip):
- for t in self.transforms:
- clip = t(clip)
- return clip
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/repl/coop-server.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/repl/coop-server.go
deleted file mode 100644
index 39ea0a63fd23c3f6112c44e1286f5794982e97c3..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/repl/coop-server.go and /dev/null differ
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/vm/vm.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/vm/vm.go
deleted file mode 100644
index 6b7d8c6201e88eaa6c5f5f309a9f47a1c2da09a2..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/vm/vm.go and /dev/null differ
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/color.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/color.go
deleted file mode 100644
index 9f2972a5457284ff0ac6d917aaf0dfc01c2f6e5f..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/color.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/rvc-models/app.py b/spaces/PeepDaSlan9/rvc-models/app.py
deleted file mode 100644
index 5ef3bed52089af1afd7b5edcf72721d92b2bbbe0..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/rvc-models/app.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import os
-import json
-import argparse
-import traceback
-import logging
-import gradio as gr
-import numpy as np
-import librosa
-import torch
-import asyncio
-import edge_tts
-from datetime import datetime
-from fairseq import checkpoint_utils
-from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
-from vc_infer_pipeline import VC
-from config import (
- is_half,
- device
-)
-logging.getLogger("numba").setLevel(logging.WARNING)
-limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
-
-def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
- def vc_fn(
- input_audio,
- f0_up_key,
- f0_method,
- index_rate,
- tts_mode,
- tts_text,
- tts_voice
- ):
- try:
- if tts_mode:
- if len(tts_text) > 100 and limitation:
- return "Text is too long", None
- if tts_text is None or tts_voice is None:
- return "You need to enter text and select a voice", None
- asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
- audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
- else:
- if args.files:
- audio, sr = librosa.load(input_audio, sr=16000, mono=True)
- else:
- if input_audio is None:
- return "You need to upload an audio", None
- sampling_rate, audio = input_audio
- duration = audio.shape[0] / sampling_rate
- if duration > 20 and limitation:
- return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
- if len(audio.shape) > 1:
- audio = librosa.to_mono(audio.transpose(1, 0))
- if sampling_rate != 16000:
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
- times = [0, 0, 0]
- f0_up_key = int(f0_up_key)
- audio_opt = vc.pipeline(
- hubert_model,
- net_g,
- 0,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- )
- print(
- f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
- )
- return "Success", (tgt_sr, audio_opt)
- except:
- info = traceback.format_exc()
- print(info)
- return info, (None, None)
- return vc_fn
-
-def load_hubert():
- global hubert_model
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
- ["hubert_base.pt"],
- suffix="",
- )
- hubert_model = models[0]
- hubert_model = hubert_model.to(device)
- if is_half:
- hubert_model = hubert_model.half()
- else:
- hubert_model = hubert_model.float()
- hubert_model.eval()
-
-def change_to_tts_mode(tts_mode):
- if tts_mode:
- return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
- else:
- return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--api', action="store_true", default=False)
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
- parser.add_argument("--files", action="store_true", default=False, help="load audio from path")
- args, unknown = parser.parse_known_args()
- load_hubert()
- models = []
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
- voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
- with open("weights/model_info.json", "r", encoding="utf-8") as f:
- models_info = json.load(f)
- for name, info in models_info.items():
- if not info['enable']:
- continue
- title = info['title']
- author = info.get("author", None)
- cover = f"weights/{name}/{info['cover']}"
- index = f"weights/{name}/{info['feature_retrieval_library']}"
- npy = f"weights/{name}/{info['feature_file']}"
- cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
- tgt_sr = cpt["config"][-1]
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
- if_f0 = cpt.get("f0", 1)
- if if_f0 == 1:
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
- else:
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
- del net_g.enc_q
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
- net_g.eval().to(device)
- if is_half:
- net_g = net_g.half()
- else:
- net_g = net_g.float()
- vc = VC(tgt_sr, device, is_half)
- models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
- with gr.Blocks() as app:
- gr.Markdown(
- "#
RVC Models\n"
- "##
The input audio should be clean and pure voice without background music.\n"
- "\n\n"
- "[](https://colab.research.google.com/drive/16MXRcKEjGDqQzVanvi8xYOOOlhdNBopM?usp=share_link)\n\n"
- "[](https://huggingface.co/spaces/zomehwh/rvc-models?duplicate=true)\n\n"
- "[](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
-
- )
- with gr.Tabs():
- for (name, title, author, cover, vc_fn) in models:
- with gr.TabItem(name):
- with gr.Row():
- gr.Markdown(
- '
'
- f'
{title}
\n'+
- (f'
Model author: {author}
' if author else "")+
- (f'' if cover else "")+
- '
'
- )
- with gr.Row():
- with gr.Column():
- if args.files:
- vc_input = gr.Textbox(label="Input audio path")
- else:
- vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')
- vc_transpose = gr.Number(label="Transpose", value=0)
- vc_f0method = gr.Radio(
- label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies",
- choices=["pm", "harvest"],
- value="pm",
- interactive=True,
- )
- vc_index_ratio = gr.Slider(
- minimum=0,
- maximum=1,
- label="Retrieval feature ratio",
- value=0.6,
- interactive=True,
- )
- tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
- tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
- tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
- vc_submit = gr.Button("Generate", variant="primary")
- with gr.Column():
- vc_output1 = gr.Textbox(label="Output Message")
- vc_output2 = gr.Audio(label="Output Audio")
- vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
- tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice])
- app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share)
\ No newline at end of file
diff --git a/spaces/Pengyey/bingo-chuchu/src/components/chat-scroll-anchor.tsx b/spaces/Pengyey/bingo-chuchu/src/components/chat-scroll-anchor.tsx
deleted file mode 100644
index ac809f4486a48e134cb69314c3d0dae5e68d614e..0000000000000000000000000000000000000000
--- a/spaces/Pengyey/bingo-chuchu/src/components/chat-scroll-anchor.tsx
+++ /dev/null
@@ -1,29 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import { useInView } from 'react-intersection-observer'
-
-import { useAtBottom } from '@/lib/hooks/use-at-bottom'
-
-interface ChatScrollAnchorProps {
- trackVisibility?: boolean
-}
-
-export function ChatScrollAnchor({ trackVisibility }: ChatScrollAnchorProps) {
- const isAtBottom = useAtBottom()
- const { ref, entry, inView } = useInView({
- trackVisibility,
- delay: 100,
- rootMargin: '0px 0px -150px 0px'
- })
-
- React.useEffect(() => {
- if (isAtBottom && trackVisibility && !inView) {
- entry?.target.scrollIntoView({
- block: 'start'
- })
- }
- }, [inView, entry, isAtBottom, trackVisibility])
-
- return
-}
diff --git a/spaces/PrajwalS/GODEL-Demo-nxt/README.md b/spaces/PrajwalS/GODEL-Demo-nxt/README.md
deleted file mode 100644
index 9b8f978eb19fa76b83ab7a1424e538d3daed5677..0000000000000000000000000000000000000000
--- a/spaces/PrajwalS/GODEL-Demo-nxt/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: GODEL Demo
-emoji: 🐠
-colorFrom: yellow
-colorTo: blue
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: microsoft/GODEL-Demo
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/QinQiuFox/get_ppt/README.md b/spaces/QinQiuFox/get_ppt/README.md
deleted file mode 100644
index 4736fd20e8ff8cd6b22c0dc7bd1972493de7d2af..0000000000000000000000000000000000000000
--- a/spaces/QinQiuFox/get_ppt/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Get Ppt
-emoji: 📈
-colorFrom: red
-colorTo: yellow
-sdk: static
-pinned: false
-license: afl-3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ralmao/glass_py/app.py b/spaces/Ralmao/glass_py/app.py
deleted file mode 100644
index b0f11d12693e2137ec20d8a2fd411bb3544fec3d..0000000000000000000000000000000000000000
--- a/spaces/Ralmao/glass_py/app.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import pandas as pd
-import numpy as np
-import sklearn as sn
-from datasets import Dataset
-
-df=pd.read_csv("https://huggingface.co/spaces/Ralmao/glass_py/raw/main/glass.csv", on_bad_lines='skip')
-dataset= Dataset.from_pandas(df)
-X = df.drop(['Type'], axis = 1)
-y = df['Type']
-
-#Importamos las librerias necesarias para la creacion del modelo
-from sklearn.model_selection import train_test_split
-
-#30% para test y 70% para train
-X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.30, random_state = 42)
-
-from sklearn.ensemble import RandomForestClassifier
-
-rf = RandomForestClassifier(n_estimators=10)
-rf.fit(X_train, y_train)
-
-
-import gradio as gr
-
-def predict_GlassType(RI,Na,Al,Mg,Si,K,Ca,Ba,Fe):
- x = np.array([RI,Na,Al,Mg,Si,K,Ca,Ba,Fe])
- pred = rf.predict(x.reshape(1, -1))
- return pred[0]
-
-RI = gr.inputs.Number(label='RI')
-Na = gr.inputs.Number(label='Na')
-Al = gr.inputs.Number(label='Al')
-Mg = gr.inputs.Number(label='Mg')
-Si = gr.inputs.Number(label='Si')
-K = gr.inputs.Number(label='K')
-Ca = gr.inputs.Number(label='Ca ')
-Ba = gr.inputs.Number(label='Ba')
-Fe = gr.inputs.Number(label='Fe')
-output = gr.outputs.Textbox(label='Glass Type')
-
-
-app = gr.Interface(predict_GlassType, inputs=[RI,Na,Al,Si,Mg,K,Ca,Ba,Fe], outputs=output, description= 'This is a Glass Type Predictor')
-app.launch()
-
-
diff --git a/spaces/RamAnanth1/T2I-Adapter/README.md b/spaces/RamAnanth1/T2I-Adapter/README.md
deleted file mode 100644
index 7cd49f43536fdb6ac1cb06b7f6061cb6347defa2..0000000000000000000000000000000000000000
--- a/spaces/RamAnanth1/T2I-Adapter/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: T2I Adapter
-emoji: 🏢
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
-tags:
- - making-demos
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/commands/cache.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/commands/cache.py
deleted file mode 100644
index c5f03302d6b03989e900cb6786946ffd01968798..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/commands/cache.py
+++ /dev/null
@@ -1,223 +0,0 @@
-import os
-import textwrap
-from optparse import Values
-from typing import Any, List
-
-import pip._internal.utils.filesystem as filesystem
-from pip._internal.cli.base_command import Command
-from pip._internal.cli.status_codes import ERROR, SUCCESS
-from pip._internal.exceptions import CommandError, PipError
-from pip._internal.utils.logging import getLogger
-
-logger = getLogger(__name__)
-
-
-class CacheCommand(Command):
- """
- Inspect and manage pip's wheel cache.
-
- Subcommands:
-
- - dir: Show the cache directory.
- - info: Show information about the cache.
- - list: List filenames of packages stored in the cache.
- - remove: Remove one or more package from the cache.
- - purge: Remove all items from the cache.
-
- ```` can be a glob expression or a package name.
- """
-
- ignore_require_venv = True
- usage = """
- %prog dir
- %prog info
- %prog list [] [--format=[human, abspath]]
- %prog remove
- %prog purge
- """
-
- def add_options(self) -> None:
-
- self.cmd_opts.add_option(
- "--format",
- action="store",
- dest="list_format",
- default="human",
- choices=("human", "abspath"),
- help="Select the output format among: human (default) or abspath",
- )
-
- self.parser.insert_option_group(0, self.cmd_opts)
-
- def run(self, options: Values, args: List[str]) -> int:
- handlers = {
- "dir": self.get_cache_dir,
- "info": self.get_cache_info,
- "list": self.list_cache_items,
- "remove": self.remove_cache_items,
- "purge": self.purge_cache,
- }
-
- if not options.cache_dir:
- logger.error("pip cache commands can not function since cache is disabled.")
- return ERROR
-
- # Determine action
- if not args or args[0] not in handlers:
- logger.error(
- "Need an action (%s) to perform.",
- ", ".join(sorted(handlers)),
- )
- return ERROR
-
- action = args[0]
-
- # Error handling happens here, not in the action-handlers.
- try:
- handlers[action](options, args[1:])
- except PipError as e:
- logger.error(e.args[0])
- return ERROR
-
- return SUCCESS
-
- def get_cache_dir(self, options: Values, args: List[Any]) -> None:
- if args:
- raise CommandError("Too many arguments")
-
- logger.info(options.cache_dir)
-
- def get_cache_info(self, options: Values, args: List[Any]) -> None:
- if args:
- raise CommandError("Too many arguments")
-
- num_http_files = len(self._find_http_files(options))
- num_packages = len(self._find_wheels(options, "*"))
-
- http_cache_location = self._cache_dir(options, "http")
- wheels_cache_location = self._cache_dir(options, "wheels")
- http_cache_size = filesystem.format_directory_size(http_cache_location)
- wheels_cache_size = filesystem.format_directory_size(wheels_cache_location)
-
- message = (
- textwrap.dedent(
- """
- Package index page cache location: {http_cache_location}
- Package index page cache size: {http_cache_size}
- Number of HTTP files: {num_http_files}
- Locally built wheels location: {wheels_cache_location}
- Locally built wheels size: {wheels_cache_size}
- Number of locally built wheels: {package_count}
- """
- )
- .format(
- http_cache_location=http_cache_location,
- http_cache_size=http_cache_size,
- num_http_files=num_http_files,
- wheels_cache_location=wheels_cache_location,
- package_count=num_packages,
- wheels_cache_size=wheels_cache_size,
- )
- .strip()
- )
-
- logger.info(message)
-
- def list_cache_items(self, options: Values, args: List[Any]) -> None:
- if len(args) > 1:
- raise CommandError("Too many arguments")
-
- if args:
- pattern = args[0]
- else:
- pattern = "*"
-
- files = self._find_wheels(options, pattern)
- if options.list_format == "human":
- self.format_for_human(files)
- else:
- self.format_for_abspath(files)
-
- def format_for_human(self, files: List[str]) -> None:
- if not files:
- logger.info("No locally built wheels cached.")
- return
-
- results = []
- for filename in files:
- wheel = os.path.basename(filename)
- size = filesystem.format_file_size(filename)
- results.append(f" - {wheel} ({size})")
- logger.info("Cache contents:\n")
- logger.info("\n".join(sorted(results)))
-
- def format_for_abspath(self, files: List[str]) -> None:
- if not files:
- return
-
- results = []
- for filename in files:
- results.append(filename)
-
- logger.info("\n".join(sorted(results)))
-
- def remove_cache_items(self, options: Values, args: List[Any]) -> None:
- if len(args) > 1:
- raise CommandError("Too many arguments")
-
- if not args:
- raise CommandError("Please provide a pattern")
-
- files = self._find_wheels(options, args[0])
-
- no_matching_msg = "No matching packages"
- if args[0] == "*":
- # Only fetch http files if no specific pattern given
- files += self._find_http_files(options)
- else:
- # Add the pattern to the log message
- no_matching_msg += ' for pattern "{}"'.format(args[0])
-
- if not files:
- logger.warning(no_matching_msg)
-
- for filename in files:
- os.unlink(filename)
- logger.verbose("Removed %s", filename)
- logger.info("Files removed: %s", len(files))
-
- def purge_cache(self, options: Values, args: List[Any]) -> None:
- if args:
- raise CommandError("Too many arguments")
-
- return self.remove_cache_items(options, ["*"])
-
- def _cache_dir(self, options: Values, subdir: str) -> str:
- return os.path.join(options.cache_dir, subdir)
-
- def _find_http_files(self, options: Values) -> List[str]:
- http_dir = self._cache_dir(options, "http")
- return filesystem.find_files(http_dir, "*")
-
- def _find_wheels(self, options: Values, pattern: str) -> List[str]:
- wheel_dir = self._cache_dir(options, "wheels")
-
- # The wheel filename format, as specified in PEP 427, is:
- # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
- #
- # Additionally, non-alphanumeric values in the distribution are
- # normalized to underscores (_), meaning hyphens can never occur
- # before `-{version}`.
- #
- # Given that information:
- # - If the pattern we're given contains a hyphen (-), the user is
- # providing at least the version. Thus, we can just append `*.whl`
- # to match the rest of it.
- # - If the pattern we're given doesn't contain a hyphen (-), the
- # user is only providing the name. Thus, we append `-*.whl` to
- # match the hyphen before the version, followed by anything else.
- #
- # PEP 427: https://www.python.org/dev/peps/pep-0427/
- pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
-
- return filesystem.find_files(wheel_dir, pattern)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py
deleted file mode 100644
index 2e3c80869d9c1a70ee003d054a53f49c3f53a556..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
- pygments.unistring
- ~~~~~~~~~~~~~~~~~~
-
- Strings of all Unicode characters of a certain category.
- Used for matching in Unicode-aware languages. Run to regenerate.
-
- Inspired by chartypes_create.py from the MoinMoin project.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-Cc = '\x00-\x1f\x7f-\x9f'
-
-Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
-
-Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
-
-Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd'
-
-Cs = '\ud800-\udbff\\\udc00\udc01-\udfff'
-
-Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943'
-
-Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1'
-
-Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
-
-Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
-
-Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921'
-
-Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
-
-Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
-
-Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef'
-
-Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959'
-
-Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e'
-
-No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c'
-
-Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
-
-Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
-
-Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
-
-Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
-
-Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
-
-Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f"
-
-Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
-
-Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0'
-
-Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff'
-
-Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
-
-So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d'
-
-Zl = '\u2028'
-
-Zp = '\u2029'
-
-Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
-
-xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
-
-xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
-
-cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
-
-# Generated from unidata 11.0.0
-
-def combine(*args):
- return ''.join(globals()[cat] for cat in args)
-
-
-def allexcept(*args):
- newcats = cats[:]
- for arg in args:
- newcats.remove(arg)
- return ''.join(globals()[cat] for cat in newcats)
-
-
-def _handle_runs(char_list): # pragma: no cover
- buf = []
- for c in char_list:
- if len(c) == 1:
- if buf and buf[-1][1] == chr(ord(c)-1):
- buf[-1] = (buf[-1][0], c)
- else:
- buf.append((c, c))
- else:
- buf.append((c, c))
- for a, b in buf:
- if a == b:
- yield a
- else:
- yield '%s-%s' % (a, b)
-
-
-if __name__ == '__main__': # pragma: no cover
- import unicodedata
-
- categories = {'xid_start': [], 'xid_continue': []}
-
- with open(__file__) as fp:
- content = fp.read()
-
- header = content[:content.find('Cc =')]
- footer = content[content.find("def combine("):]
-
- for code in range(0x110000):
- c = chr(code)
- cat = unicodedata.category(c)
- if ord(c) == 0xdc00:
- # Hack to avoid combining this combining with the preceding high
- # surrogate, 0xdbff, when doing a repr.
- c = '\\' + c
- elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
- # Escape regex metachars.
- c = '\\' + c
- categories.setdefault(cat, []).append(c)
- # XID_START and XID_CONTINUE are special categories used for matching
- # identifiers in Python 3.
- if c.isidentifier():
- categories['xid_start'].append(c)
- if ('a' + c).isidentifier():
- categories['xid_continue'].append(c)
-
- with open(__file__, 'w') as fp:
- fp.write(header)
-
- for cat in sorted(categories):
- val = ''.join(_handle_runs(categories[cat]))
- fp.write('%s = %a\n\n' % (cat, val))
-
- cats = sorted(categories)
- cats.remove('xid_start')
- cats.remove('xid_continue')
- fp.write('cats = %r\n\n' % cats)
-
- fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
-
- fp.write(footer)
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py
deleted file mode 100644
index 34e3a9950cc557879af8d797f9382b18a870fb56..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Read resources contained within a package."""
-
-from ._common import (
- as_file,
- files,
- Package,
-)
-
-from ._legacy import (
- contents,
- open_binary,
- read_binary,
- open_text,
- read_text,
- is_resource,
- path,
- Resource,
-)
-
-from .abc import ResourceReader
-
-
-__all__ = [
- 'Package',
- 'Resource',
- 'ResourceReader',
- 'as_file',
- 'contents',
- 'files',
- 'is_resource',
- 'open_binary',
- 'open_text',
- 'path',
- 'read_binary',
- 'read_text',
-]
diff --git a/spaces/Reself/StableVideo/annotator/canny/__init__.py b/spaces/Reself/StableVideo/annotator/canny/__init__.py
deleted file mode 100644
index cb0da951dc838ec9dec2131007e036113281800b..0000000000000000000000000000000000000000
--- a/spaces/Reself/StableVideo/annotator/canny/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import cv2
-
-
-class CannyDetector:
- def __call__(self, img, low_threshold, high_threshold):
- return cv2.Canny(img, low_threshold, high_threshold)
diff --git a/spaces/Rifd/Face-Real-ESRGAN/README.md b/spaces/Rifd/Face-Real-ESRGAN/README.md
deleted file mode 100644
index 9eef59cb5b000ef8f9d721d0b3b81f2501c2ada5..0000000000000000000000000000000000000000
--- a/spaces/Rifd/Face-Real-ESRGAN/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Face Real ESRGAN 2x 4x 8x
-emoji: 😻
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: true
-license: apache-2.0
-duplicated_from: doevent/Face-Real-ESRGAN
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/registry.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/registry.py
deleted file mode 100644
index 39eabc58db4b5954478a2ac1ab91cea5e45ab055..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/registry.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from annotator.uniformer.mmcv.utils import Registry
-
-CONV_LAYERS = Registry('conv layer')
-NORM_LAYERS = Registry('norm layer')
-ACTIVATION_LAYERS = Registry('activation layer')
-PADDING_LAYERS = Registry('padding layer')
-UPSAMPLE_LAYERS = Registry('upsample layer')
-PLUGIN_LAYERS = Registry('plugin layer')
-
-DROPOUT_LAYERS = Registry('drop out layers')
-POSITIONAL_ENCODING = Registry('position encoding')
-ATTENTION = Registry('attention')
-FEEDFORWARD_NETWORK = Registry('feed-forward Network')
-TRANSFORMER_LAYER = Registry('transformerLayer')
-TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/plugin.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/plugin.py
deleted file mode 100644
index 07c010d4053174dd41107aa654ea67e82b46a25c..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/cnn/bricks/plugin.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import inspect
-import platform
-
-from .registry import PLUGIN_LAYERS
-
-if platform.system() == 'Windows':
- import regex as re
-else:
- import re
-
-
-def infer_abbr(class_type):
- """Infer abbreviation from the class name.
-
- This method will infer the abbreviation to map class types to
- abbreviations.
-
- Rule 1: If the class has the property "abbr", return the property.
- Rule 2: Otherwise, the abbreviation falls back to snake case of class
- name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
-
- Args:
- class_type (type): The norm layer type.
-
- Returns:
- str: The inferred abbreviation.
- """
-
- def camel2snack(word):
- """Convert camel case word into snack case.
-
- Modified from `inflection lib
- `_.
-
- Example::
-
- >>> camel2snack("FancyBlock")
- 'fancy_block'
- """
-
- word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
- word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
- word = word.replace('-', '_')
- return word.lower()
-
- if not inspect.isclass(class_type):
- raise TypeError(
- f'class_type must be a type, but got {type(class_type)}')
- if hasattr(class_type, '_abbr_'):
- return class_type._abbr_
- else:
- return camel2snack(class_type.__name__)
-
-
-def build_plugin_layer(cfg, postfix='', **kwargs):
- """Build plugin layer.
-
- Args:
- cfg (None or dict): cfg should contain:
- type (str): identify plugin layer type.
- layer args: args needed to instantiate a plugin layer.
- postfix (int, str): appended into norm abbreviation to
- create named layer. Default: ''.
-
- Returns:
- tuple[str, nn.Module]:
- name (str): abbreviation + postfix
- layer (nn.Module): created plugin layer
- """
- if not isinstance(cfg, dict):
- raise TypeError('cfg must be a dict')
- if 'type' not in cfg:
- raise KeyError('the cfg dict must contain the key "type"')
- cfg_ = cfg.copy()
-
- layer_type = cfg_.pop('type')
- if layer_type not in PLUGIN_LAYERS:
- raise KeyError(f'Unrecognized plugin type {layer_type}')
-
- plugin_layer = PLUGIN_LAYERS.get(layer_type)
- abbr = infer_abbr(plugin_layer)
-
- assert isinstance(postfix, (int, str))
- name = abbr + str(postfix)
-
- layer = plugin_layer(**kwargs, **cfg_)
-
- return name, layer
diff --git a/spaces/RohithMidigudla/Comment_Toxicity_Detection/app.py b/spaces/RohithMidigudla/Comment_Toxicity_Detection/app.py
deleted file mode 100644
index cff902c8bf111e5d62dbac09f1a1f96f9792443c..0000000000000000000000000000000000000000
--- a/spaces/RohithMidigudla/Comment_Toxicity_Detection/app.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import pandas as pd
-import numpy as np
-import gradio as gr
-
-import tensorflow as tf
-from tensorflow.keras.layers import TextVectorization
-
-df = pd.read_csv('https://raw.githubusercontent.com/whitehatjr1001/comment-toxicity-detecttion-/main/jigsaw-toxic-comment-classification-challenge/train.csv/train.csv')
-
-X = df['comment_text']
-y = df[df.columns[2:]].values
-
-max_features = 2000000
-
-vecterizor = TextVectorization(max_tokens=max_features,output_sequence_length=1800,output_mode='int')
-
-vecterizor.adapt(X.values)
-
-model_path = 'commenttoxicity (1).h5'
-
-model = tf.keras.models.load_model(model_path)
-
-
-def score_comment(comment):
- vectorized_comment = vecterizor([comment])
- results = model.predict(vectorized_comment)
-
- text = ''
- for idx, col in enumerate(df.columns[2:]):
- text += '{}: {}\n'.format(col, results[0][idx]>0.5)
-
- return text
-
-interface = gr.Interface(fn=score_comment,
- inputs=gr.Textbox(lines=2, placeholder='Comment to score'),
- outputs='text')
-
-interface.launch(share=True)
diff --git a/spaces/Sanathkumar1603/hackathon/app/templates/expr_recognition.html b/spaces/Sanathkumar1603/hackathon/app/templates/expr_recognition.html
deleted file mode 100644
index e89ac46299f9efb9a4d781291362cd37c5c2def0..0000000000000000000000000000000000000000
--- a/spaces/Sanathkumar1603/hackathon/app/templates/expr_recognition.html
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
- Index
-
-
-
-
-
Expression Recognition
-
-
-
-
-
-
-
diff --git a/spaces/ServerX/PorcoDiaz/infer/lib/train/mel_processing.py b/spaces/ServerX/PorcoDiaz/infer/lib/train/mel_processing.py
deleted file mode 100644
index f458775bf62b79f791b419ca7ed62c550ae252d5..0000000000000000000000000000000000000000
--- a/spaces/ServerX/PorcoDiaz/infer/lib/train/mel_processing.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import torch
-import torch.utils.data
-from librosa.filters import mel as librosa_mel_fn
-import logging
-
-logger = logging.getLogger(__name__)
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- return dynamic_range_compression_torch(magnitudes)
-
-
-def spectral_de_normalize_torch(magnitudes):
- return dynamic_range_decompression_torch(magnitudes)
-
-
-# Reusable banks
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- """Convert waveform into Linear-frequency Linear-amplitude spectrogram.
-
- Args:
- y :: (B, T) - Audio waveforms
- n_fft
- sampling_rate
- hop_size
- win_size
- center
- Returns:
- :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram
- """
- # Validation
- if torch.min(y) < -1.07:
- logger.debug("min value is %s", str(torch.min(y)))
- if torch.max(y) > 1.07:
- logger.debug("max value is %s", str(torch.max(y)))
-
- # Window - Cache if needed
- global hann_window
- dtype_device = str(y.dtype) + "_" + str(y.device)
- wnsize_dtype_device = str(win_size) + "_" + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(
- dtype=y.dtype, device=y.device
- )
-
- # Padding
- y = torch.nn.functional.pad(
- y.unsqueeze(1),
- (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
- mode="reflect",
- )
- y = y.squeeze(1)
-
- # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2)
- spec = torch.stft(
- y,
- n_fft,
- hop_length=hop_size,
- win_length=win_size,
- window=hann_window[wnsize_dtype_device],
- center=center,
- pad_mode="reflect",
- normalized=False,
- onesided=True,
- return_complex=False,
- )
-
- # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame)
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- # MelBasis - Cache if needed
- global mel_basis
- dtype_device = str(spec.dtype) + "_" + str(spec.device)
- fmax_dtype_device = str(fmax) + "_" + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(
- sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax
- )
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(
- dtype=spec.dtype, device=spec.device
- )
-
- # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame)
- melspec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- melspec = spectral_normalize_torch(melspec)
- return melspec
-
-
-def mel_spectrogram_torch(
- y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False
-):
- """Convert waveform into Mel-frequency Log-amplitude spectrogram.
-
- Args:
- y :: (B, T) - Waveforms
- Returns:
- melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram
- """
- # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame)
- spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center)
-
- # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame)
- melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax)
-
- return melspec
diff --git a/spaces/Shocky/Pink-Anime/app.py b/spaces/Shocky/Pink-Anime/app.py
deleted file mode 100644
index da364b16cd36df598b187e6c202472b25092c251..0000000000000000000000000000000000000000
--- a/spaces/Shocky/Pink-Anime/app.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import os
-from subprocess import getoutput
-
-gpu_info = getoutput('nvidia-smi')
-if("A10G" in gpu_info):
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl")
-elif("T4" in gpu_info):
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl")
-
-os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui")
-os.chdir("/home/user/app/stable-diffusion-webui")
-
-os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py")
-os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''')
-os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py")
-os.system(f"sed -i -e 's/inputs=\[component\],/&\\n queue=False,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
-os.system(f"sed -i -e 's/outputs=\[token_counter\]/outputs=[token_counter], queue=False/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
-
-# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header----------------------------
-#os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py")
-#os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
-# ---------------------------------------------------------------------------------------------------------------------------------------------------
-
-os.system(f"wget -q https://huggingface.co/Alsebay/PeachMixs/resolve/main/PeachTachyonMixs/PeachTachyon2.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PeachTachyon2.safetensors")
-os.system(f"wget -q https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/VAE/vae-ft-mse-840000-ema-pruned.ckpt")
-os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser")
-#Embeddings TEXTUAL INVERSION
-os.system(f"wget -q https://huggingface.co/ai-moroz/lazy-ti/resolve/main/dump/diona-gi.pt -O /home/user/app/stable-diffusion-webui/embeddings/diona-gi.pt")
-os.system(f"wget -q https://huggingface.co/datasets/gsdf/EasyNegative/resolve/main/EasyNegative.safetensors -O /home/user/app/stable-diffusion-webui/embeddings/EasyNegative.safetensors")
-
-if "IS_SHARED_UI" in os.environ:
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json")
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json")
- os.system(f"wget -q {os.getenv('EMBED_LINK')} -O /home/user/app/stable-diffusion-webui/embeddings/{os.getenv('EMBED_NAME')}")
- os.system(f"python launch.py --use-cpu all --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --skip-torch-cuda-test")
-else:
- # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py")
- #os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py")
-
- # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME")
- #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study")
- #os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser")
- #os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui")
-
- # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt")
- #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt")
- #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt")
- #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt")
- #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt")
- #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt")
- #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt")
- os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/wd-1-4-anime_e2.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Waifudiffusion-1-4-anime_e2.ckpt")
- os.system(f"wget -q https://huggingface.co/gsdf/Counterfeit-V2.5/resolve/main/Counterfeit-V2.5_pruned.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Counterfeit-V2.5_pruned.safetensors")
- os.system(f"wget -q https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.5-pruned.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-v4.5-pruned.safetensors")
- os.system(f"wget -q https://huggingface.co/Alsebay/PeachMixs/resolve/main/PeachUltimaMixs/PeachUltima2.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PeachUltima2.safetensors")
- os.system(f"wget -q https://huggingface.co/iZELX1/Grapefruit/resolve/main/grapefruitv4.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Grapefruitv4.safetensors")
- os.system(f"wget -q https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3_orangemixs.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/AbyssOrangeMix3.safetensors")
- #os.system(f"wget -q https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/AbyssOrangeMix3A1.safetensors")
- #os.system(f"wget -q https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A2.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/AbyssOrangeMix3A2.safetensors")
- #os.system(f"wget -q https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3.safetensors -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/AbyssOrangeMix3A3.safetensors")
- os.system(f"wget -q {os.getenv('EMBED_LINK')} -O /home/user/app/stable-diffusion-webui/embeddings/{os.getenv('EMBED_NAME')}")
- #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt")
- #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt")
- #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt")
- #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt")
- #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt")
- #os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml")
- os.system(f"EXPOSE 7860")
- #os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser")
- #os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json")
- # os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json")
- os.system(f"python launch.py --precision full --no-half --use-cpu all --listen --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --skip-torch-cuda-test")
diff --git a/spaces/SteveDigital/free-fast-youtube-url-video-to-text-using-openai-whisper/README.md b/spaces/SteveDigital/free-fast-youtube-url-video-to-text-using-openai-whisper/README.md
deleted file mode 100644
index 9a82c2b72d1dc7f1380da039ec65feaddbf3df08..0000000000000000000000000000000000000000
--- a/spaces/SteveDigital/free-fast-youtube-url-video-to-text-using-openai-whisper/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Free Youtube URL Video-to-Text Using OpenAI Whisper
-emoji: 📚
-colorFrom: green
-colorTo: blue
-sdk: gradio
-sdk_version: 3.11.0
-app_file: app.py
-pinned: false
-license: gpl-3.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/test_discriminators.py b/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/test_discriminators.py
deleted file mode 100644
index fad89a0ae4534dc7967b6ccda194b9fd1dedbffe..0000000000000000000000000000000000000000
--- a/spaces/SuYuanS/AudioCraft_Plus/tests/adversarial/test_discriminators.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import random
-
-import torch
-
-from audiocraft.adversarial.discriminators import (
- MultiPeriodDiscriminator,
- MultiScaleDiscriminator,
- MultiScaleSTFTDiscriminator
-)
-
-
-class TestMultiPeriodDiscriminator:
-
- def test_mpd_discriminator(self):
- N, C, T = 2, 2, random.randrange(1, 100_000)
- t0 = torch.randn(N, C, T)
- periods = [1, 2, 3]
- mpd = MultiPeriodDiscriminator(periods=periods, in_channels=C)
- logits, fmaps = mpd(t0)
-
- assert len(logits) == len(periods)
- assert len(fmaps) == len(periods)
- assert all([logit.shape[0] == N and len(logit.shape) == 4 for logit in logits])
- assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
-
-
-class TestMultiScaleDiscriminator:
-
- def test_msd_discriminator(self):
- N, C, T = 2, 2, random.randrange(1, 100_000)
- t0 = torch.randn(N, C, T)
-
- scale_norms = ['weight_norm', 'weight_norm']
- msd = MultiScaleDiscriminator(scale_norms=scale_norms, in_channels=C)
- logits, fmaps = msd(t0)
-
- assert len(logits) == len(scale_norms)
- assert len(fmaps) == len(scale_norms)
- assert all([logit.shape[0] == N and len(logit.shape) == 3 for logit in logits])
- assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
-
-
-class TestMultiScaleStftDiscriminator:
-
- def test_msstftd_discriminator(self):
- N, C, T = 2, 2, random.randrange(1, 100_000)
- t0 = torch.randn(N, C, T)
-
- n_filters = 4
- n_ffts = [128, 256, 64]
- hop_lengths = [32, 64, 16]
- win_lengths = [128, 256, 64]
-
- msstftd = MultiScaleSTFTDiscriminator(filters=n_filters, n_ffts=n_ffts, hop_lengths=hop_lengths,
- win_lengths=win_lengths, in_channels=C)
- logits, fmaps = msstftd(t0)
-
- assert len(logits) == len(n_ffts)
- assert len(fmaps) == len(n_ffts)
- assert all([logit.shape[0] == N and len(logit.shape) == 4 for logit in logits])
- assert all([feature.shape[0] == N for fmap in fmaps for feature in fmap])
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/conftest.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/conftest.py
deleted file mode 100644
index abf61314798797c2c44b002c89487fa2dfbfd9f9..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/conftest.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import builtins
-import inspect
-import os
-import pathlib
-import shutil
-import sys
-import types
-
-import pytest
-
-# Must register before it gets imported
-pytest.register_assert_rewrite("IPython.testing.tools")
-
-from .testing import tools
-
-
-def pytest_collection_modifyitems(items):
- """This function is automatically run by pytest passing all collected test
- functions.
-
- We use it to add asyncio marker to all async tests and assert we don't use
- test functions that are async generators which wouldn't make sense.
- """
- for item in items:
- if inspect.iscoroutinefunction(item.obj):
- item.add_marker("asyncio")
- assert not inspect.isasyncgenfunction(item.obj)
-
-
-def get_ipython():
- from .terminal.interactiveshell import TerminalInteractiveShell
- if TerminalInteractiveShell._instance:
- return TerminalInteractiveShell.instance()
-
- config = tools.default_config()
- config.TerminalInteractiveShell.simple_prompt = True
-
- # Create and initialize our test-friendly IPython instance.
- shell = TerminalInteractiveShell.instance(config=config)
- return shell
-
-
-@pytest.fixture(scope='session', autouse=True)
-def work_path():
- path = pathlib.Path("./tmp-ipython-pytest-profiledir")
- os.environ["IPYTHONDIR"] = str(path.absolute())
- if path.exists():
- raise ValueError('IPython dir temporary path already exists ! Did previous test run exit successfully ?')
- path.mkdir()
- yield
- shutil.rmtree(str(path.resolve()))
-
-
-def nopage(strng, start=0, screen_lines=0, pager_cmd=None):
- if isinstance(strng, dict):
- strng = strng.get("text/plain", "")
- print(strng)
-
-
-def xsys(self, cmd):
- """Replace the default system call with a capturing one for doctest.
- """
- # We use getoutput, but we need to strip it because pexpect captures
- # the trailing newline differently from commands.getoutput
- print(self.getoutput(cmd, split=False, depth=1).rstrip(), end="", file=sys.stdout)
- sys.stdout.flush()
-
-
-# for things to work correctly we would need this as a session fixture;
-# unfortunately this will fail on some test that get executed as _collection_
-# time (before the fixture run), in particular parametrized test that contain
-# yields. so for now execute at import time.
-#@pytest.fixture(autouse=True, scope='session')
-def inject():
-
- builtins.get_ipython = get_ipython
- builtins._ip = get_ipython()
- builtins.ip = get_ipython()
- builtins.ip.system = types.MethodType(xsys, ip)
- builtins.ip.builtin_trap.activate()
- from .core import page
-
- page.pager_page = nopage
- # yield
-
-
-inject()
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/parallel/collate.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/parallel/collate.py
deleted file mode 100644
index ad749197df21b0d74297548be5f66a696adebf7f..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/parallel/collate.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from collections.abc import Mapping, Sequence
-
-import torch
-import torch.nn.functional as F
-from torch.utils.data.dataloader import default_collate
-
-from .data_container import DataContainer
-
-
-def collate(batch, samples_per_gpu=1):
- """Puts each data field into a tensor/DataContainer with outer dimension
- batch size.
-
- Extend default_collate to add support for
- :type:`~mmcv.parallel.DataContainer`. There are 3 cases.
-
- 1. cpu_only = True, e.g., meta data
- 2. cpu_only = False, stack = True, e.g., images tensors
- 3. cpu_only = False, stack = False, e.g., gt bboxes
- """
-
- if not isinstance(batch, Sequence):
- raise TypeError(f'{batch.dtype} is not supported.')
-
- if isinstance(batch[0], DataContainer):
- stacked = []
- if batch[0].cpu_only:
- for i in range(0, len(batch), samples_per_gpu):
- stacked.append(
- [sample.data for sample in batch[i:i + samples_per_gpu]])
- return DataContainer(
- stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
- elif batch[0].stack:
- for i in range(0, len(batch), samples_per_gpu):
- assert isinstance(batch[i].data, torch.Tensor)
-
- if batch[i].pad_dims is not None:
- ndim = batch[i].dim()
- assert ndim > batch[i].pad_dims
- max_shape = [0 for _ in range(batch[i].pad_dims)]
- for dim in range(1, batch[i].pad_dims + 1):
- max_shape[dim - 1] = batch[i].size(-dim)
- for sample in batch[i:i + samples_per_gpu]:
- for dim in range(0, ndim - batch[i].pad_dims):
- assert batch[i].size(dim) == sample.size(dim)
- for dim in range(1, batch[i].pad_dims + 1):
- max_shape[dim - 1] = max(max_shape[dim - 1],
- sample.size(-dim))
- padded_samples = []
- for sample in batch[i:i + samples_per_gpu]:
- pad = [0 for _ in range(batch[i].pad_dims * 2)]
- for dim in range(1, batch[i].pad_dims + 1):
- pad[2 * dim -
- 1] = max_shape[dim - 1] - sample.size(-dim)
- padded_samples.append(
- F.pad(
- sample.data, pad, value=sample.padding_value))
- stacked.append(default_collate(padded_samples))
- elif batch[i].pad_dims is None:
- stacked.append(
- default_collate([
- sample.data
- for sample in batch[i:i + samples_per_gpu]
- ]))
- else:
- raise ValueError(
- 'pad_dims should be either None or integers (1-3)')
-
- else:
- for i in range(0, len(batch), samples_per_gpu):
- stacked.append(
- [sample.data for sample in batch[i:i + samples_per_gpu]])
- return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
- elif isinstance(batch[0], Sequence):
- transposed = zip(*batch)
- return [collate(samples, samples_per_gpu) for samples in transposed]
- elif isinstance(batch[0], Mapping):
- return {
- key: collate([d[key] for d in batch], samples_per_gpu)
- for key in batch[0]
- }
- else:
- return default_collate(batch)
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/network/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/network/__init__.py
deleted file mode 100644
index b51bde91b2e5b4e557ed9b70fc113843cc3d49ae..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/network/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-"""Contains purely network-related utilities.
-"""
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__version__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__version__.py
deleted file mode 100644
index 5063c3f8ee7980493efcc30c24f7e7582714aa81..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/requests/__version__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# .-. .-. .-. . . .-. .-. .-. .-.
-# |( |- |.| | | |- `-. | `-.
-# ' ' `-' `-`.`-' `-' `-' ' `-'
-
-__title__ = "requests"
-__description__ = "Python HTTP for Humans."
-__url__ = "https://requests.readthedocs.io"
-__version__ = "2.31.0"
-__build__ = 0x023100
-__author__ = "Kenneth Reitz"
-__author_email__ = "me@kennethreitz.org"
-__license__ = "Apache 2.0"
-__copyright__ = "Copyright Kenneth Reitz"
-__cake__ = "\u2728 \U0001f370 \u2728"
diff --git a/spaces/Team-PIXEL/PIXEL/README.md b/spaces/Team-PIXEL/PIXEL/README.md
deleted file mode 100644
index c4677329449ab77a02471d088a18b509e9eea6d7..0000000000000000000000000000000000000000
--- a/spaces/Team-PIXEL/PIXEL/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: PIXEL
-emoji: 🐱
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.1.1
-app_file: app.py
-pinned: true
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/build.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/build.py
deleted file mode 100644
index 34eb12d00d94ff905b796e75e2c4c5845257c8e9..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/build.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from detectron2.utils.registry import Registry
-
-PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR")
-PROPOSAL_GENERATOR_REGISTRY.__doc__ = """
-Registry for proposal generator, which produces object proposals from feature maps.
-
-The registered object will be called with `obj(cfg, input_shape)`.
-The call should return a `nn.Module` object.
-"""
-
-from . import rpn, rrpn # noqa F401 isort:skip
-
-
-def build_proposal_generator(cfg, input_shape):
- """
- Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`.
- The name can be "PrecomputedProposals" to use no proposal generator.
- """
- name = cfg.MODEL.PROPOSAL_GENERATOR.NAME
- if name == "PrecomputedProposals":
- return None
-
- return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape)
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/structures/test_rotated_boxes.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/structures/test_rotated_boxes.py
deleted file mode 100644
index 2781237427d74c92f082b0a563165174985daa41..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/structures/test_rotated_boxes.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from __future__ import absolute_import, division, print_function, unicode_literals
-import logging
-import math
-import random
-import unittest
-import torch
-from fvcore.common.benchmark import benchmark
-
-from detectron2.layers.rotated_boxes import pairwise_iou_rotated
-from detectron2.structures.boxes import Boxes
-from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou
-from detectron2.utils.testing import reload_script_model
-
-logger = logging.getLogger(__name__)
-
-
-class TestRotatedBoxesLayer(unittest.TestCase):
- def test_iou_0_dim_cpu(self):
- boxes1 = torch.rand(0, 5, dtype=torch.float32)
- boxes2 = torch.rand(10, 5, dtype=torch.float32)
- expected_ious = torch.zeros(0, 10, dtype=torch.float32)
- ious = pairwise_iou_rotated(boxes1, boxes2)
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- boxes1 = torch.rand(10, 5, dtype=torch.float32)
- boxes2 = torch.rand(0, 5, dtype=torch.float32)
- expected_ious = torch.zeros(10, 0, dtype=torch.float32)
- ious = pairwise_iou_rotated(boxes1, boxes2)
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_iou_0_dim_cuda(self):
- boxes1 = torch.rand(0, 5, dtype=torch.float32)
- boxes2 = torch.rand(10, 5, dtype=torch.float32)
- expected_ious = torch.zeros(0, 10, dtype=torch.float32)
- ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
- self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
-
- boxes1 = torch.rand(10, 5, dtype=torch.float32)
- boxes2 = torch.rand(0, 5, dtype=torch.float32)
- expected_ious = torch.zeros(10, 0, dtype=torch.float32)
- ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
- self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
-
- def test_iou_half_overlap_cpu(self):
- boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
- boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
- expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
- ious = pairwise_iou_rotated(boxes1, boxes2)
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_iou_half_overlap_cuda(self):
- boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32)
- boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32)
- expected_ious = torch.tensor([[0.5]], dtype=torch.float32)
- ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
- self.assertTrue(torch.allclose(ious_cuda.cpu(), expected_ious))
-
- def test_iou_precision(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor([[565, 565, 10, 10.0, 0]], dtype=torch.float32, device=device)
- boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32, device=device)
- iou = 8.3 / 10.0
- expected_ious = torch.tensor([[iou]], dtype=torch.float32)
- ious = pairwise_iou_rotated(boxes1, boxes2)
- self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_iou_too_many_boxes_cuda(self):
- s1, s2 = 5, 1289035
- boxes1 = torch.zeros(s1, 5)
- boxes2 = torch.zeros(s2, 5)
- ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda())
- self.assertTupleEqual(tuple(ious_cuda.shape), (s1, s2))
-
- def test_iou_extreme(self):
- # Cause floating point issues in cuda kernels (#1266)
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
- boxes2 = torch.tensor(
- [
- [
- -1.117407639806935e17,
- 1.3858420478349148e18,
- 1000.0000610351562,
- 1000.0000610351562,
- 1612.0,
- ]
- ],
- device=device,
- )
- ious = pairwise_iou_rotated(boxes1, boxes2)
- self.assertTrue(ious.min() >= 0, ious)
-
- def test_iou_issue_2154(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor(
- [
- [
- 296.6620178222656,
- 458.73883056640625,
- 23.515729904174805,
- 47.677001953125,
- 0.08795166015625,
- ]
- ],
- device=device,
- )
- boxes2 = torch.tensor(
- [[296.66201, 458.73882000000003, 23.51573, 47.67702, 0.087951]],
- device=device,
- )
- ious = pairwise_iou_rotated(boxes1, boxes2)
- expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
- self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
-
- def test_iou_issue_2167(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor(
- [
- [
- 2563.74462890625000000000,
- 1436.79016113281250000000,
- 2174.70336914062500000000,
- 214.09500122070312500000,
- 115.11834716796875000000,
- ]
- ],
- device=device,
- )
- boxes2 = torch.tensor(
- [
- [
- 2563.74462890625000000000,
- 1436.79028320312500000000,
- 2174.70288085937500000000,
- 214.09495544433593750000,
- 115.11835479736328125000,
- ]
- ],
- device=device,
- )
- ious = pairwise_iou_rotated(boxes1, boxes2)
- expected_ious = torch.tensor([[1.0]], dtype=torch.float32)
- self.assertTrue(torch.allclose(ious.cpu(), expected_ious))
-
-
-class TestRotatedBoxesStructure(unittest.TestCase):
- def test_clip_area_0_degree(self):
- for _ in range(50):
- num_boxes = 100
- boxes_5d = torch.zeros(num_boxes, 5)
- boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
- boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
- boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
- boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
- # Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2)
- boxes_4d = torch.zeros(num_boxes, 4)
- boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0
- boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0
- boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0
- boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0
-
- image_size = (500, 600)
- test_boxes_4d = Boxes(boxes_4d)
- test_boxes_5d = RotatedBoxes(boxes_5d)
- # Before clip
- areas_4d = test_boxes_4d.area()
- areas_5d = test_boxes_5d.area()
- self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
- # After clip
- test_boxes_4d.clip(image_size)
- test_boxes_5d.clip(image_size)
- areas_4d = test_boxes_4d.area()
- areas_5d = test_boxes_5d.area()
- self.assertTrue(torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5))
-
- def test_clip_area_arbitrary_angle(self):
- num_boxes = 100
- boxes_5d = torch.zeros(num_boxes, 5)
- boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
- boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
- boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
- boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
- boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
- clip_angle_threshold = random.uniform(0, 180)
-
- image_size = (500, 600)
- test_boxes_5d = RotatedBoxes(boxes_5d)
- # Before clip
- areas_before = test_boxes_5d.area()
- # After clip
- test_boxes_5d.clip(image_size, clip_angle_threshold)
- areas_diff = test_boxes_5d.area() - areas_before
-
- # the areas should only decrease after clipping
- self.assertTrue(torch.all(areas_diff <= 0))
- # whenever the box is clipped (thus the area shrinks),
- # the angle for the box must be within the clip_angle_threshold
- # Note that the clip function will normalize the angle range
- # to be within (-180, 180]
- self.assertTrue(
- torch.all(torch.abs(boxes_5d[:, 4][torch.where(areas_diff < 0)]) < clip_angle_threshold)
- )
-
- def test_normalize_angles(self):
- # torch.manual_seed(0)
- for _ in range(50):
- num_boxes = 100
- boxes_5d = torch.zeros(num_boxes, 5)
- boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
- boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500)
- boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500)
- boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500)
- boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
- rotated_boxes = RotatedBoxes(boxes_5d)
- normalized_boxes = rotated_boxes.clone()
- normalized_boxes.normalize_angles()
- self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] >= -180))
- self.assertTrue(torch.all(normalized_boxes.tensor[:, 4] < 180))
- # x, y, w, h should not change
- self.assertTrue(torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4]))
- # the cos/sin values of the angles should stay the same
-
- self.assertTrue(
- torch.allclose(
- torch.cos(boxes_5d[:, 4] * math.pi / 180),
- torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180),
- atol=1e-5,
- )
- )
-
- self.assertTrue(
- torch.allclose(
- torch.sin(boxes_5d[:, 4] * math.pi / 180),
- torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180),
- atol=1e-5,
- )
- )
-
- def test_pairwise_iou_0_degree(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor(
- [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]],
- dtype=torch.float32,
- device=device,
- )
- boxes2 = torch.tensor(
- [
- [0.5, 0.5, 1.0, 1.0, 0.0],
- [0.25, 0.5, 0.5, 1.0, 0.0],
- [0.5, 0.25, 1.0, 0.5, 0.0],
- [0.25, 0.25, 0.5, 0.5, 0.0],
- [0.75, 0.75, 0.5, 0.5, 0.0],
- [1.0, 1.0, 1.0, 1.0, 0.0],
- ],
- dtype=torch.float32,
- device=device,
- )
- expected_ious = torch.tensor(
- [
- [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
- [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)],
- ],
- dtype=torch.float32,
- device=device,
- )
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_pairwise_iou_45_degrees(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor(
- [
- [1, 1, math.sqrt(2), math.sqrt(2), 45],
- [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45],
- ],
- dtype=torch.float32,
- device=device,
- )
- boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device)
- expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device)
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_pairwise_iou_orthogonal(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device)
- boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device)
- iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0)
- expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_pairwise_iou_large_close_boxes(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- boxes1 = torch.tensor(
- [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]],
- dtype=torch.float32,
- device=device,
- )
- boxes2 = torch.tensor(
- [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]],
- dtype=torch.float32,
- device=device,
- )
- iou = 364.259155 / 364.259186
- expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_pairwise_iou_many_boxes(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- num_boxes1 = 100
- num_boxes2 = 200
- boxes1 = torch.stack(
- [
- torch.tensor(
- [5 + 20 * i, 5 + 20 * i, 10, 10, 0],
- dtype=torch.float32,
- device=device,
- )
- for i in range(num_boxes1)
- ]
- )
- boxes2 = torch.stack(
- [
- torch.tensor(
- [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
- dtype=torch.float32,
- device=device,
- )
- for i in range(num_boxes2)
- ]
- )
- expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device)
- for i in range(min(num_boxes1, num_boxes2)):
- expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_pairwise_iou_issue1207_simplified(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- # Simplified test case of D2-issue-1207
- boxes1 = torch.tensor([[3, 3, 8, 2, -45.0]], device=device)
- boxes2 = torch.tensor([[6, 0, 8, 2, -45.0]], device=device)
- iou = 0.0
- expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
-
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_pairwise_iou_issue1207(self):
- for device in ["cpu"] + (["cuda"] if torch.cuda.is_available() else []):
- # The original test case in D2-issue-1207
- boxes1 = torch.tensor([[160.0, 153.0, 230.0, 23.0, -37.0]], device=device)
- boxes2 = torch.tensor([[190.0, 127.0, 80.0, 21.0, -46.0]], device=device)
-
- iou = 0.0
- expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device)
-
- ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2))
- self.assertTrue(torch.allclose(ious, expected_ious))
-
- def test_empty_cat(self):
- x = RotatedBoxes.cat([])
- self.assertTrue(x.tensor.shape, (0, 5))
-
- def test_scriptability(self):
- def func(x):
- boxes = RotatedBoxes(x)
- test = boxes.to(torch.device("cpu")).tensor
- return boxes.area(), test
-
- f = torch.jit.script(func)
- f = reload_script_model(f)
- f(torch.rand((3, 5)))
-
- data = torch.rand((3, 5))
-
- def func_cat(x: torch.Tensor):
- boxes1 = RotatedBoxes(x)
- boxes2 = RotatedBoxes(x)
- # this is not supported by torchscript for now.
- # boxes3 = RotatedBoxes.cat([boxes1, boxes2])
- boxes3 = boxes1.cat([boxes1, boxes2])
- return boxes3
-
- f = torch.jit.script(func_cat)
- script_box = f(data)
- self.assertTrue(torch.equal(torch.cat([data, data]), script_box.tensor))
-
-
-def benchmark_rotated_iou():
- num_boxes1 = 200
- num_boxes2 = 500
- boxes1 = torch.stack(
- [
- torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32)
- for i in range(num_boxes1)
- ]
- )
- boxes2 = torch.stack(
- [
- torch.tensor(
- [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0],
- dtype=torch.float32,
- )
- for i in range(num_boxes2)
- ]
- )
-
- def func(dev, n=1):
- b1 = boxes1.to(device=dev)
- b2 = boxes2.to(device=dev)
-
- def bench():
- for _ in range(n):
- pairwise_iou_rotated(b1, b2)
- if dev.type == "cuda":
- torch.cuda.synchronize()
-
- return bench
-
- # only run it once per timed loop, since it's slow
- args = [{"dev": torch.device("cpu"), "n": 1}]
- if torch.cuda.is_available():
- args.append({"dev": torch.device("cuda"), "n": 10})
-
- benchmark(func, "rotated_iou", args, warmup_iters=3)
-
-
-if __name__ == "__main__":
- unittest.main()
- benchmark_rotated_iou()
diff --git a/spaces/Theivaprakasham/wildreceipt/README.md b/spaces/Theivaprakasham/wildreceipt/README.md
deleted file mode 100644
index 281f882ccfb88020e0f7611a9e90761f3a1c39cf..0000000000000000000000000000000000000000
--- a/spaces/Theivaprakasham/wildreceipt/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Wildreceipt
-emoji: 🚀
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.0.15
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Ukrania/RVC-Models/lib/infer_pack/modules.py b/spaces/Ukrania/RVC-Models/lib/infer_pack/modules.py
deleted file mode 100644
index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000
--- a/spaces/Ukrania/RVC-Models/lib/infer_pack/modules.py
+++ /dev/null
@@ -1,522 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-from lib.infer_pack import commons
-from lib.infer_pack.commons import init_weights, get_padding
-from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(
- self,
- in_channels,
- hidden_channels,
- out_channels,
- kernel_size,
- n_layers,
- p_dropout,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(
- nn.Conv1d(
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
- for _ in range(n_layers - 1):
- self.conv_layers.append(
- nn.Conv1d(
- hidden_channels,
- hidden_channels,
- kernel_size,
- padding=kernel_size // 2,
- )
- )
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
-
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size**i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(
- nn.Conv1d(
- channels,
- channels,
- kernel_size,
- groups=channels,
- dilation=dilation,
- padding=padding,
- )
- )
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(
- self,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- p_dropout=0,
- ):
- super(WN, self).__init__()
- assert kernel_size % 2 == 1
- self.hidden_channels = hidden_channels
- self.kernel_size = (kernel_size,)
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(
- gin_channels, 2 * hidden_channels * n_layers, 1
- )
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
-
- for i in range(n_layers):
- dilation = dilation_rate**i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(
- hidden_channels,
- 2 * hidden_channels,
- kernel_size,
- dilation=dilation,
- padding=padding,
- )
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:, self.hidden_channels :, :]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2]),
- )
- ),
- ]
- )
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=1,
- padding=get_padding(kernel_size, 1),
- )
- ),
- ]
- )
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList(
- [
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]),
- )
- ),
- weight_norm(
- Conv1d(
- channels,
- channels,
- kernel_size,
- 1,
- dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]),
- )
- ),
- ]
- )
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels, 1))
- self.logs = nn.Parameter(torch.zeros(channels, 1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1, 2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False,
- ):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=p_dropout,
- gin_channels=gin_channels,
- )
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1, 2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class ConvFlow(nn.Module):
- def __init__(
- self,
- in_channels,
- filter_channels,
- kernel_size,
- n_layers,
- num_bins=10,
- tail_bound=5.0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
- self.proj = nn.Conv1d(
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
- )
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
- self.filter_channels
- )
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(
- x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails="linear",
- tail_bound=self.tail_bound,
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/helpers/theb.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/helpers/theb.py
deleted file mode 100644
index 71cfd23ff34768092e4dbe3ff6b719a946dceebb..0000000000000000000000000000000000000000
--- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/helpers/theb.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import json
-import sys
-from re import findall
-from curl_cffi import requests
-
-config = json.loads(sys.argv[1])
-prompt = config['messages'][-1]['content']
-
-headers = {
- 'authority': 'chatbot.theb.ai',
- 'accept': 'application/json, text/plain, */*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'content-type': 'application/json',
- 'origin': 'https://chatbot.theb.ai',
- 'referer': 'https://chatbot.theb.ai/',
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
-}
-
-json_data = {
- 'prompt': prompt,
- 'options': {}
-}
-
-def format(chunk):
- try:
- completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
- print(completion_chunk, flush=True, end='')
-
- except Exception as e:
- print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
- return
-
-while True:
- try:
- response = requests.post('https://chatbot.theb.ai/api/chat-process',
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
-
- exit(0)
-
- except Exception as e:
- print('[ERROR] an error occured, retrying... |', e, flush=True)
- continue
\ No newline at end of file
diff --git a/spaces/Vorkrath/CarperAI-diff-codegen-6b-v2/README.md b/spaces/Vorkrath/CarperAI-diff-codegen-6b-v2/README.md
deleted file mode 100644
index c4fdd2a496bc744dba82c6f9e9f13ab790f4379b..0000000000000000000000000000000000000000
--- a/spaces/Vorkrath/CarperAI-diff-codegen-6b-v2/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: CarperAI Diff Codegen 6b V2
-emoji: 🔥
-colorFrom: pink
-colorTo: blue
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Wanlau/sovits-4.0_datealive/modules/mel_processing.py b/spaces/Wanlau/sovits-4.0_datealive/modules/mel_processing.py
deleted file mode 100644
index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000
--- a/spaces/Wanlau/sovits-4.0_datealive/modules/mel_processing.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import math
-import os
-import random
-import torch
-from torch import nn
-import torch.nn.functional as F
-import torch.utils.data
-import numpy as np
-import librosa
-import librosa.util as librosa_util
-from librosa.util import normalize, pad_center, tiny
-from scipy.signal import get_window
-from scipy.io.wavfile import read
-from librosa.filters import mel as librosa_mel_fn
-
-MAX_WAV_VALUE = 32768.0
-
-
-def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression_torch(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def spectral_normalize_torch(magnitudes):
- output = dynamic_range_compression_torch(magnitudes)
- return output
-
-
-def spectral_de_normalize_torch(magnitudes):
- output = dynamic_range_decompression_torch(magnitudes)
- return output
-
-
-mel_basis = {}
-hann_window = {}
-
-
-def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
- return spec
-
-
-def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
- global mel_basis
- dtype_device = str(spec.dtype) + '_' + str(spec.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
- return spec
-
-
-def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
- if torch.min(y) < -1.:
- print('min value is ', torch.min(y))
- if torch.max(y) > 1.:
- print('max value is ', torch.max(y))
-
- global mel_basis, hann_window
- dtype_device = str(y.dtype) + '_' + str(y.device)
- fmax_dtype_device = str(fmax) + '_' + dtype_device
- wnsize_dtype_device = str(win_size) + '_' + dtype_device
- if fmax_dtype_device not in mel_basis:
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
- mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
- if wnsize_dtype_device not in hann_window:
- hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
-
- y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
- y = y.squeeze(1)
-
- spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
-
- spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
-
- spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
- spec = spectral_normalize_torch(spec)
-
- return spec
diff --git a/spaces/WhisperAI/WhisperAIWeb/Dockerfile b/spaces/WhisperAI/WhisperAIWeb/Dockerfile
deleted file mode 100644
index 9fd6a524574d302ef5a1531a767339922a0c3521..0000000000000000000000000000000000000000
--- a/spaces/WhisperAI/WhisperAIWeb/Dockerfile
+++ /dev/null
@@ -1,20 +0,0 @@
-FROM python:3.7
-
-RUN pip install virtualenv
-ENV VIRTUAL_ENV=/venv
-RUN virtualenv venv -p python3
-ENV PATH="VIRTUAL_ENV/bin:$PATH"
-
-WORKDIR /app
-ADD . /app
-
-# Install dependencies
-RUN pip install -r requirements.txt
-RUN apt-get update
-RUN apt-get install ffmpeg
-
-# Expose port
-ENV PORT 8501
-
-# Run the application:
-CMD ["streamlit","run","app.py"]
\ No newline at end of file
diff --git a/spaces/YONG627/456123/yolov5-code-main/app.py b/spaces/YONG627/456123/yolov5-code-main/app.py
deleted file mode 100644
index 9394e9b5b587efc0606bce99212472a040828728..0000000000000000000000000000000000000000
--- a/spaces/YONG627/456123/yolov5-code-main/app.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import torch
-import gradio as gr
-
-model = torch.hub.load("./", "custom", path="yolov5s.pt", source="local")
-
-title = "✨基于YOLOV5的目标检测平台✨"
-
-desc = "(●'◡'●)"
-base_conf, base_iou = 0.15, 0.5
-
-
-def det_image(img, conf_最小置信度, iou_最大置信度):
- model.conf = conf_最小置信度
- model.iou = iou_最大置信度
-
- return model(img).render()[0]
-
-
-gr.Interface(inputs=["image", gr.Slider(minimum=0, maximum=1, value=base_conf), gr.Slider(minimum=0, maximum=1, value=base_iou)],
- outputs=["image"],
- fn=det_image,
- title=title,
- description=desc,
- article = "(●ˇ∀ˇ●)",
- live=True,
- examples=[["E:/BBX/Document/pytorch/息肉病变检测/YOLO5/yolov5-master/data/images/301.jpg", base_conf, base_iou], ["E:/BBX/Document/pytorch/息肉病变检测/YOLO5/yolov5-master/data/images/402.jpg", 0.3, base_iou], ["E:/BBX/Document/pytorch/息肉病变检测/YOLO5/yolov5-master/data/images/503.jpg", base_conf, base_iou]]).launch(auth=("admin", "1234"),share=True)
diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_ddim_flax.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_ddim_flax.py
deleted file mode 100644
index 157321d4681639c865e77745f9513b9a9a43b466..0000000000000000000000000000000000000000
--- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/schedulers/scheduling_ddim_flax.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Copyright 2022 Stanford University Team and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
-# and https://github.com/hojonathanho/diffusion
-
-import math
-from dataclasses import dataclass
-from typing import Optional, Tuple, Union
-
-import flax
-import jax.numpy as jnp
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from ..utils import deprecate
-from .scheduling_utils_flax import (
- _FLAX_COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS,
- FlaxSchedulerMixin,
- FlaxSchedulerOutput,
- broadcast_to_shape_from_left,
-)
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> jnp.ndarray:
- """
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
- (1-beta) over time from t = [0,1].
-
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
- to that part of the diffusion process.
-
-
- Args:
- num_diffusion_timesteps (`int`): the number of betas to produce.
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
- prevent singularities.
-
- Returns:
- betas (`jnp.ndarray`): the betas used by the scheduler to step the model outputs
- """
-
- def alpha_bar(time_step):
- return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
-
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return jnp.array(betas, dtype=jnp.float32)
-
-
-@flax.struct.dataclass
-class DDIMSchedulerState:
- # setable values
- timesteps: jnp.ndarray
- alphas_cumprod: jnp.ndarray
- num_inference_steps: Optional[int] = None
-
- @classmethod
- def create(cls, num_train_timesteps: int, alphas_cumprod: jnp.ndarray):
- return cls(timesteps=jnp.arange(0, num_train_timesteps)[::-1], alphas_cumprod=alphas_cumprod)
-
-
-@dataclass
-class FlaxDDIMSchedulerOutput(FlaxSchedulerOutput):
- state: DDIMSchedulerState
-
-
-class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin):
- """
- Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising
- diffusion probabilistic models (DDPMs) with non-Markovian guidance.
-
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
- [`~SchedulerMixin.from_pretrained`] functions.
-
- For more details, see the original paper: https://arxiv.org/abs/2010.02502
-
- Args:
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
- beta_start (`float`): the starting `beta` value of inference.
- beta_end (`float`): the final `beta` value.
- beta_schedule (`str`):
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
- trained_betas (`jnp.ndarray`, optional):
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
- clip_sample (`bool`, default `True`):
- option to clip predicted sample between -1 and 1 for numerical stability.
- set_alpha_to_one (`bool`, default `True`):
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
- otherwise it uses the value of alpha at step 0.
- steps_offset (`int`, default `0`):
- an offset added to the inference steps. You can use a combination of `offset=1` and
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
- stable diffusion.
- prediction_type (`str`, default `epsilon`):
- indicates whether the model predicts the noise (epsilon), or the samples. One of `epsilon`, `sample`.
- `v-prediction` is not supported for this scheduler.
-
- """
-
- _compatibles = _FLAX_COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()
- _deprecated_kwargs = ["predict_epsilon"]
-
- @property
- def has_state(self):
- return True
-
- @register_to_config
- def __init__(
- self,
- num_train_timesteps: int = 1000,
- beta_start: float = 0.0001,
- beta_end: float = 0.02,
- beta_schedule: str = "linear",
- set_alpha_to_one: bool = True,
- steps_offset: int = 0,
- prediction_type: str = "epsilon",
- **kwargs,
- ):
- message = (
- "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler ="
- " FlaxDDIMScheduler.from_pretrained(, prediction_type='epsilon')`."
- )
- predict_epsilon = deprecate("predict_epsilon", "0.11.0", message, take_from=kwargs)
- if predict_epsilon is not None:
- self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample")
-
- if beta_schedule == "linear":
- self.betas = jnp.linspace(beta_start, beta_end, num_train_timesteps, dtype=jnp.float32)
- elif beta_schedule == "scaled_linear":
- # this schedule is very specific to the latent diffusion model.
- self.betas = jnp.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=jnp.float32) ** 2
- elif beta_schedule == "squaredcos_cap_v2":
- # Glide cosine schedule
- self.betas = betas_for_alpha_bar(num_train_timesteps)
- else:
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
-
- self.alphas = 1.0 - self.betas
-
- # HACK for now - clean up later (PVP)
- self._alphas_cumprod = jnp.cumprod(self.alphas, axis=0)
-
- # At every step in ddim, we are looking into the previous alphas_cumprod
- # For the final step, there is no previous alphas_cumprod because we are already at 0
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
- # whether we use the final alpha of the "non-previous" one.
- self.final_alpha_cumprod = jnp.array(1.0) if set_alpha_to_one else float(self._alphas_cumprod[0])
-
- # standard deviation of the initial noise distribution
- self.init_noise_sigma = 1.0
-
- def scale_model_input(
- self, state: DDIMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
- ) -> jnp.ndarray:
- """
- Args:
- state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance.
- sample (`jnp.ndarray`): input sample
- timestep (`int`, optional): current timestep
-
- Returns:
- `jnp.ndarray`: scaled input sample
- """
- return sample
-
- def create_state(self):
- return DDIMSchedulerState.create(
- num_train_timesteps=self.config.num_train_timesteps, alphas_cumprod=self._alphas_cumprod
- )
-
- def _get_variance(self, timestep, prev_timestep, alphas_cumprod):
- alpha_prod_t = alphas_cumprod[timestep]
- alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], self.final_alpha_cumprod)
- beta_prod_t = 1 - alpha_prod_t
- beta_prod_t_prev = 1 - alpha_prod_t_prev
-
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
-
- return variance
-
- def set_timesteps(
- self, state: DDIMSchedulerState, num_inference_steps: int, shape: Tuple = ()
- ) -> DDIMSchedulerState:
- """
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
-
- Args:
- state (`DDIMSchedulerState`):
- the `FlaxDDIMScheduler` state data class instance.
- num_inference_steps (`int`):
- the number of diffusion steps used when generating samples with a pre-trained model.
- """
- offset = self.config.steps_offset
-
- step_ratio = self.config.num_train_timesteps // num_inference_steps
- # creates integer timesteps by multiplying by ratio
- # casting to int to avoid issues when num_inference_step is power of 3
- timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round()[::-1]
- timesteps = timesteps + offset
-
- return state.replace(num_inference_steps=num_inference_steps, timesteps=timesteps)
-
- def step(
- self,
- state: DDIMSchedulerState,
- model_output: jnp.ndarray,
- timestep: int,
- sample: jnp.ndarray,
- return_dict: bool = True,
- ) -> Union[FlaxDDIMSchedulerOutput, Tuple]:
- """
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
- process from the learned model outputs (most often the predicted noise).
-
- Args:
- state (`DDIMSchedulerState`): the `FlaxDDIMScheduler` state data class instance.
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
- timestep (`int`): current discrete timestep in the diffusion chain.
- sample (`jnp.ndarray`):
- current instance of sample being created by diffusion process.
- return_dict (`bool`): option for returning tuple rather than FlaxDDIMSchedulerOutput class
-
- Returns:
- [`FlaxDDIMSchedulerOutput`] or `tuple`: [`FlaxDDIMSchedulerOutput`] if `return_dict` is True, otherwise a
- `tuple`. When returning a tuple, the first element is the sample tensor.
-
- """
- if state.num_inference_steps is None:
- raise ValueError(
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
- )
-
- # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
- # Ideally, read DDIM paper in-detail understanding
-
- # Notation ( ->
- # - pred_noise_t -> e_theta(x_t, t)
- # - pred_original_sample -> f_theta(x_t, t) or x_0
- # - std_dev_t -> sigma_t
- # - eta -> η
- # - pred_sample_direction -> "direction pointing to x_t"
- # - pred_prev_sample -> "x_t-1"
-
- # TODO(Patrick) - eta is always 0.0 for now, allow to be set in step function
- eta = 0.0
-
- # 1. get previous step value (=t-1)
- prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps
-
- alphas_cumprod = state.alphas_cumprod
-
- # 2. compute alphas, betas
- alpha_prod_t = alphas_cumprod[timestep]
- alpha_prod_t_prev = jnp.where(prev_timestep >= 0, alphas_cumprod[prev_timestep], self.final_alpha_cumprod)
-
- beta_prod_t = 1 - alpha_prod_t
-
- # 3. compute predicted original sample from predicted noise also called
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- if self.config.prediction_type == "epsilon":
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
- elif self.config.prediction_type == "sample":
- pred_original_sample = model_output
- elif self.config.prediction_type == "v_prediction":
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
- # predict V
- model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
- else:
- raise ValueError(
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
- " `v_prediction`"
- )
-
- # 4. compute variance: "sigma_t(η)" -> see formula (16)
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
- variance = self._get_variance(timestep, prev_timestep, alphas_cumprod)
- std_dev_t = eta * variance ** (0.5)
-
- # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
-
- # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
-
- if not return_dict:
- return (prev_sample, state)
-
- return FlaxDDIMSchedulerOutput(prev_sample=prev_sample, state=state)
-
- def add_noise(
- self,
- original_samples: jnp.ndarray,
- noise: jnp.ndarray,
- timesteps: jnp.ndarray,
- ) -> jnp.ndarray:
- sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
- sqrt_alpha_prod = broadcast_to_shape_from_left(sqrt_alpha_prod, original_samples.shape)
-
- sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.0
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
- sqrt_one_minus_alpha_prod = broadcast_to_shape_from_left(sqrt_one_minus_alpha_prod, original_samples.shape)
-
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
- return noisy_samples
-
- def __len__(self):
- return self.config.num_train_timesteps
diff --git a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py b/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py
deleted file mode 100644
index f0cf9779b270e1aead32845006f8b881fcba37ad..0000000000000000000000000000000000000000
--- a/spaces/YouLiXiya/Mobile-SAM/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from torch import Tensor, nn
-from torchvision.ops.boxes import nms
-from transformers import BertConfig, BertModel, BertPreTrainedModel
-from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
-
-
-class BertModelWarper(nn.Module):
- def __init__(self, bert_model):
- super().__init__()
- # self.bert = bert_modelc
-
- self.config = bert_model.config
- self.embeddings = bert_model.embeddings
- self.encoder = bert_model.encoder
- self.pooler = bert_model.pooler
-
- self.get_extended_attention_mask = bert_model.get_extended_attention_mask
- self.invert_attention_mask = bert_model.invert_attention_mask
- self.get_head_mask = bert_model.get_head_mask
-
- def forward(
- self,
- input_ids=None,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- inputs_embeds=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_values=None,
- use_cache=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- ):
- r"""
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
-
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
- use_cache (:obj:`bool`, `optional`):
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
- decoding (see :obj:`past_key_values`).
- """
- output_attentions = (
- output_attentions if output_attentions is not None else self.config.output_attentions
- )
- output_hidden_states = (
- output_hidden_states
- if output_hidden_states is not None
- else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if self.config.is_decoder:
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- else:
- use_cache = False
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- batch_size, seq_length = input_shape
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- batch_size, seq_length = input_shape
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- # past_key_values_length
- past_key_values_length = (
- past_key_values[0][0].shape[2] if past_key_values is not None else 0
- )
-
- if attention_mask is None:
- attention_mask = torch.ones(
- ((batch_size, seq_length + past_key_values_length)), device=device
- )
- if token_type_ids is None:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
- attention_mask, input_shape, device
- )
-
- # If a 2D or 3D attention mask is provided for the cross-attention
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if self.config.is_decoder and encoder_hidden_states is not None:
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
- if encoder_attention_mask is None:
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
- else:
- encoder_extended_attention_mask = None
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
- # import ipdb; ipdb.set_trace()
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-
- embedding_output = self.embeddings(
- input_ids=input_ids,
- position_ids=position_ids,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- past_key_values_length=past_key_values_length,
- )
-
- encoder_outputs = self.encoder(
- embedding_output,
- attention_mask=extended_attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_extended_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = encoder_outputs[0]
- pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
-
- if not return_dict:
- return (sequence_output, pooled_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndCrossAttentions(
- last_hidden_state=sequence_output,
- pooler_output=pooled_output,
- past_key_values=encoder_outputs.past_key_values,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- cross_attentions=encoder_outputs.cross_attentions,
- )
-
-
-class TextEncoderShell(nn.Module):
- def __init__(self, text_encoder):
- super().__init__()
- self.text_encoder = text_encoder
- self.config = self.text_encoder.config
-
- def forward(self, **kw):
- # feed into text encoder
- return self.text_encoder(**kw)
-
-
-def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):
- """Generate attention mask between each pair of special tokens
- Args:
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
- special_tokens_mask (list): special tokens mask.
- Returns:
- torch.Tensor: attention mask between each special tokens.
- """
- input_ids = tokenized["input_ids"]
- bs, num_token = input_ids.shape
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
- for special_token in special_tokens_list:
- special_tokens_mask |= input_ids == special_token
-
- # idxs: each row is a list of indices of special tokens
- idxs = torch.nonzero(special_tokens_mask)
-
- # generate attention mask and positional ids
- attention_mask = (
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
- )
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
- previous_col = 0
- for i in range(idxs.shape[0]):
- row, col = idxs[i]
- if (col == 0) or (col == num_token - 1):
- attention_mask[row, col, col] = True
- position_ids[row, col] = 0
- else:
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
- 0, col - previous_col, device=input_ids.device
- )
-
- previous_col = col
-
- # # padding mask
- # padding_mask = tokenized['attention_mask']
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
-
- return attention_mask, position_ids.to(torch.long)
-
-
-def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):
- """Generate attention mask between each pair of special tokens
- Args:
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
- special_tokens_mask (list): special tokens mask.
- Returns:
- torch.Tensor: attention mask between each special tokens.
- """
- input_ids = tokenized["input_ids"]
- bs, num_token = input_ids.shape
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
- for special_token in special_tokens_list:
- special_tokens_mask |= input_ids == special_token
-
- # idxs: each row is a list of indices of special tokens
- idxs = torch.nonzero(special_tokens_mask)
-
- # generate attention mask and positional ids
- attention_mask = (
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
- )
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
- cate_to_token_mask_list = [[] for _ in range(bs)]
- previous_col = 0
- for i in range(idxs.shape[0]):
- row, col = idxs[i]
- if (col == 0) or (col == num_token - 1):
- attention_mask[row, col, col] = True
- position_ids[row, col] = 0
- else:
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
- 0, col - previous_col, device=input_ids.device
- )
- c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
- c2t_maski[previous_col + 1 : col] = True
- cate_to_token_mask_list[row].append(c2t_maski)
- previous_col = col
-
- cate_to_token_mask_list = [
- torch.stack(cate_to_token_mask_listi, dim=0)
- for cate_to_token_mask_listi in cate_to_token_mask_list
- ]
-
- # # padding mask
- # padding_mask = tokenized['attention_mask']
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
-
- return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list
diff --git a/spaces/Yuliang/ECON/lib/pymafx/utils/pose_utils.py b/spaces/Yuliang/ECON/lib/pymafx/utils/pose_utils.py
deleted file mode 100644
index 966eb51850b6fa793b1e11d4dcad1eed2c6698da..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ECON/lib/pymafx/utils/pose_utils.py
+++ /dev/null
@@ -1,151 +0,0 @@
-"""
-Parts of the code are adapted from https://github.com/akanazawa/hmr
-"""
-from __future__ import absolute_import, division, print_function
-
-import numpy as np
-import torch
-
-
-def compute_similarity_transform(S1, S2):
- """
- Computes a similarity transform (sR, t) that takes
- a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
- where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
- i.e. solves the orthogonal Procrutes problem.
- """
- transposed = False
- if S1.shape[0] != 3 and S1.shape[0] != 2:
- S1 = S1.T
- S2 = S2.T
- transposed = True
- assert (S2.shape[1] == S1.shape[1])
-
- # 1. Remove mean.
- mu1 = S1.mean(axis=1, keepdims=True)
- mu2 = S2.mean(axis=1, keepdims=True)
- X1 = S1 - mu1
- X2 = S2 - mu2
-
- # 2. Compute variance of X1 used for scale.
- var1 = np.sum(X1**2)
-
- # 3. The outer product of X1 and X2.
- K = X1.dot(X2.T)
-
- # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
- # singular vectors of K.
- U, s, Vh = np.linalg.svd(K)
- V = Vh.T
- # Construct Z that fixes the orientation of R to get det(R)=1.
- Z = np.eye(U.shape[0])
- Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
- # Construct R.
- R = V.dot(Z.dot(U.T))
-
- # 5. Recover scale.
- scale = np.trace(R.dot(K)) / var1
-
- # 6. Recover translation.
- t = mu2 - scale * (R.dot(mu1))
-
- # 7. Error:
- S1_hat = scale * R.dot(S1) + t
-
- if transposed:
- S1_hat = S1_hat.T
-
- return S1_hat
-
-
-def compute_similarity_transform_batch(S1, S2):
- """Batched version of compute_similarity_transform."""
- S1_hat = np.zeros_like(S1)
- for i in range(S1.shape[0]):
- S1_hat[i] = compute_similarity_transform(S1[i], S2[i])
- return S1_hat
-
-
-def reconstruction_error(S1, S2, reduction='mean'):
- """Do Procrustes alignment and compute reconstruction error."""
- S1_hat = compute_similarity_transform_batch(S1, S2)
- re = np.sqrt(((S1_hat - S2)**2).sum(axis=-1)).mean(axis=-1)
- if reduction == 'mean':
- re = re.mean()
- elif reduction == 'sum':
- re = re.sum()
- return re, S1_hat
-
-
-# https://math.stackexchange.com/questions/382760/composition-of-two-axis-angle-rotations
-def axis_angle_add(theta, roll_axis, alpha):
- """Composition of two axis-angle rotations (PyTorch version)
- Args:
- theta: N x 3
- roll_axis: N x 3
- alph: N x 1
- Returns:
- equivalent axis-angle of the composition
- """
- alpha = alpha / 2.
-
- l2norm = torch.norm(theta + 1e-8, p=2, dim=1)
- angle = torch.unsqueeze(l2norm, -1)
-
- normalized = torch.div(theta, angle)
- angle = angle * 0.5
- b_cos = torch.cos(angle).cpu()
- b_sin = torch.sin(angle).cpu()
-
- a_cos = torch.cos(alpha)
- a_sin = torch.sin(alpha)
-
- dot_mm = torch.sum(normalized * roll_axis, dim=1, keepdim=True)
- cross_mm = torch.zeros_like(normalized)
- cross_mm[:, 0] = roll_axis[:, 1] * normalized[:, 2] - roll_axis[:, 2] * normalized[:, 1]
- cross_mm[:, 1] = roll_axis[:, 2] * normalized[:, 0] - roll_axis[:, 0] * normalized[:, 2]
- cross_mm[:, 2] = roll_axis[:, 0] * normalized[:, 1] - roll_axis[:, 1] * normalized[:, 0]
-
- c_cos = a_cos * b_cos - a_sin * b_sin * dot_mm
- c_sin_n = a_sin * b_cos * roll_axis + a_cos * b_sin * normalized + a_sin * b_sin * cross_mm
-
- c_angle = 2 * torch.acos(c_cos)
- c_sin = torch.sin(c_angle * 0.5)
- c_n = (c_angle / c_sin) * c_sin_n
-
- return c_n
-
-
-def axis_angle_add_np(theta, roll_axis, alpha):
- """Composition of two axis-angle rotations (NumPy version)
- Args:
- theta: N x 3
- roll_axis: N x 3
- alph: N x 1
- Returns:
- equivalent axis-angle of the composition
- """
- alpha = alpha / 2.
-
- angle = np.linalg.norm(theta + 1e-8, ord=2, axis=1, keepdims=True)
- normalized = np.divide(theta, angle)
- angle = angle * 0.5
-
- b_cos = np.cos(angle)
- b_sin = np.sin(angle)
- a_cos = np.cos(alpha)
- a_sin = np.sin(alpha)
-
- dot_mm = np.sum(normalized * roll_axis, axis=1, keepdims=True)
- cross_mm = np.zeros_like(normalized)
- cross_mm[:, 0] = roll_axis[:, 1] * normalized[:, 2] - roll_axis[:, 2] * normalized[:, 1]
- cross_mm[:, 1] = roll_axis[:, 2] * normalized[:, 0] - roll_axis[:, 0] * normalized[:, 2]
- cross_mm[:, 2] = roll_axis[:, 0] * normalized[:, 1] - roll_axis[:, 1] * normalized[:, 0]
-
- c_cos = a_cos * b_cos - a_sin * b_sin * dot_mm
- c_sin_n = a_sin * b_cos * roll_axis + a_cos * b_sin * normalized + a_sin * b_sin * cross_mm
- c_angle = 2 * np.arccos(c_cos)
- c_sin = np.sin(c_angle * 0.5)
- c_n = (c_angle / c_sin) * c_sin_n
-
- return c_n
diff --git a/spaces/aakashgoel12/nlp1/code/qa.py b/spaces/aakashgoel12/nlp1/code/qa.py
deleted file mode 100644
index 7b391170d5284c4c5c2e23afba09552ad7c339de..0000000000000000000000000000000000000000
--- a/spaces/aakashgoel12/nlp1/code/qa.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import streamlit as st
-from transformers import pipeline
-import pandas as pd
-import datetime
-import base64
-import os
-
-@st.cache(allow_output_mutation = True)
-def load_model():
- question_answerer = pipeline('question-answering')
- return question_answerer
-
-#function to convert any dataframe to a csv file
-# @st.cache
-output_path = "results/df_log_file.csv"
-def csv_downloader(df):
- # IMPORTANT: Cache the conversion to prevent computation on every rerun
- res = df.to_csv(index=False,sep="\t").encode('utf-8')
- st.download_button(
- label="Download logs data as CSV separated by tab",
- data=res,
- file_name='df_log_file.csv',
- mime='text/csv')
-
-def load_file():
- """Load text from file"""
- uploaded_file = st.file_uploader("Upload Files",type=['txt'])
-
- if uploaded_file is not None:
- if uploaded_file.type == "text/plain":
- raw_text = str(uploaded_file.read(),"utf-8")
- return raw_text
-
-
-st.markdown('')
-
-# Loading Model
-question_answerer =load_model()
-
-# App title and description
-st.title("Answering questions from text")
-st.write("Upload text, pose questions, get answers")
-
-# Load file
-st.text("Disclaimer: This app stores user's input for model improvement purposes !!")
-raw_text = load_file()
-if raw_text != None and raw_text != '':
-
- # Display text
- with st.expander("See text"):
- st.write(raw_text)
-
- answer = ''
- question = st.text_input('Ask a question')
-
- if question != '' and raw_text != '':
- answer = question_answerer({
- 'question': question,
- 'context': raw_text
- })
-
- st.write("Answer: {}".format(answer["answer"]))
- st.write("Confidence score: {}".format(round(answer["score"],2)))
- res_df = pd.DataFrame({"TimeStamp":[str(datetime.datetime.now())],\
- "Question":[question],\
- "Input":[str(raw_text)],\
- "Answer":[str(answer["answer"])],\
- "Score":[str(round(answer["score"],2))]})
- res_df.to_csv(output_path, mode='a', index=False, sep="\t", header= not os.path.exists(output_path))
- st.dataframe(pd.read_csv(output_path,sep="\t").tail(4))
- csv_downloader(pd.read_csv(output_path,sep="\t"))
-
-
-# def csv_downloader(data):
-# csvfile = data.to_csv()
-# b64 = base64.b64encode(csvfile.encode()).decode()
-# new_filename = "results/df_log_file.csv"
-# st.markdown("#### Download File ###")
-# href = f'Click Here!!'
-# st.markdown(href,unsafe_allow_html=True)
-
-# log_file_object = open('../logs/log_file.tsv','a')
-# log_file_object.write(str(datetime.datetime.now())+'\t'+str(raw_text)+'\t'+str(answer["answer"])+'\t'+str(answer["score"]))
-# log_file_object.write('\n')
-# log_file_object.close()
-
-
-# @st.cache(allow_output_mutation=True)
-# def Pageviews():
-# return []
-
-# pageviews=Pageviews()
-# pageviews.append('dummy')
-# try:
-# st.markdown('Page viewed = {} times.'.format(len(pageviews)))
-# except ValueError:
-# st.markdown('Page viewed = {} times.'.format(1))
diff --git a/spaces/abby-mcdonald/CardioPro/README.md b/spaces/abby-mcdonald/CardioPro/README.md
deleted file mode 100644
index 189938e523b2cb266ce0b11e7316e3d1c3545029..0000000000000000000000000000000000000000
--- a/spaces/abby-mcdonald/CardioPro/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: CardioPro
-emoji: 🔥
-colorFrom: yellow
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/datasets/stare.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/datasets/stare.py
deleted file mode 100644
index 3f71b25488cc11a6b4d582ac52b5a24e1ad1cf8e..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/datasets/stare.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# dataset settings
-dataset_type = 'STAREDataset'
-data_root = 'data/STARE'
-img_norm_cfg = dict(
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-img_scale = (605, 700)
-crop_size = (128, 128)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations'),
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PhotoMetricDistortion'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=img_scale,
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-
-data = dict(
- samples_per_gpu=4,
- workers_per_gpu=4,
- train=dict(
- type='RepeatDataset',
- times=40000,
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/training',
- ann_dir='annotations/training',
- pipeline=train_pipeline)),
- val=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline),
- test=dict(
- type=dataset_type,
- data_root=data_root,
- img_dir='images/validation',
- ann_dir='annotations/validation',
- pipeline=test_pipeline))
diff --git a/spaces/akhaliq/GPEN/face_model/op/__init__.py b/spaces/akhaliq/GPEN/face_model/op/__init__.py
deleted file mode 100644
index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/GPEN/face_model/op/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .fused_act import FusedLeakyReLU, fused_leaky_relu
-from .upfirdn2d import upfirdn2d
diff --git a/spaces/akhaliq/Inkpunk-Diffusion/app.py b/spaces/akhaliq/Inkpunk-Diffusion/app.py
deleted file mode 100644
index ab75e3ed9e40377da8d83359383fea9d6bdd4e1f..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Inkpunk-Diffusion/app.py
+++ /dev/null
@@ -1,155 +0,0 @@
-from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
-import gradio as gr
-import torch
-from PIL import Image
-
-model_id = 'Envvi/Inkpunk-Diffusion'
-prefix = ''
-
-scheduler = DPMSolverMultistepScheduler(
- beta_start=0.00085,
- beta_end=0.012,
- beta_schedule="scaled_linear",
- num_train_timesteps=1000,
- trained_betas=None,
- predict_epsilon=True,
- thresholding=False,
- algorithm_type="dpmsolver++",
- solver_type="midpoint",
- lower_order_final=True,
-)
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-if torch.cuda.is_available():
- pipe = pipe.to("cuda")
- pipe_i2i = pipe_i2i.to("cuda")
-
-def error_str(error, title="Error"):
- return f"""#### {title}
- {error}""" if error else ""
-
-def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=True):
-
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
-
- try:
- if img is not None:
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
- else:
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
- except Exception as e:
- return None, error_str(e)
-
-def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
-
- result = pipe(
- prompt,
- negative_prompt = neg_prompt,
- num_inference_steps = int(steps),
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return replace_nsfw_images(result)
-
-def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
-
- ratio = min(height / img.height, width / img.width)
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
- result = pipe_i2i(
- prompt,
- negative_prompt = neg_prompt,
- init_image = img,
- num_inference_steps = int(steps),
- strength = strength,
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return replace_nsfw_images(result)
-
-def replace_nsfw_images(results):
-
- for i in range(len(results.images)):
- if results.nsfw_content_detected[i]:
- results.images[i] = Image.open("nsfw.png")
- return results.images[0]
-
-css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
-"""
-with gr.Blocks(css=css) as demo:
- gr.HTML(
- f"""
-
-
-
Inkpunk Diffusion
-
-
- Demo for Inkpunk Diffusion Stable Diffusion model.
- Add the following tokens to your prompts for the model to work properly: .
-
-
This demo is currently on cpu, to use it upgrade to gpu by going to settings after duplicating this space:
- Running on {"GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"}
-
- """)
-
-demo.queue(concurrency_count=1)
-demo.launch()
diff --git a/spaces/akhaliq/yolov7/utils/google_app_engine/Dockerfile b/spaces/akhaliq/yolov7/utils/google_app_engine/Dockerfile
deleted file mode 100644
index 0155618f475104e9858b81470339558156c94e13..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/yolov7/utils/google_app_engine/Dockerfile
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM gcr.io/google-appengine/python
-
-# Create a virtualenv for dependencies. This isolates these packages from
-# system-level packages.
-# Use -p python3 or -p python3.7 to select python version. Default is version 2.
-RUN virtualenv /env -p python3
-
-# Setting these environment variables are the same as running
-# source /env/bin/activate.
-ENV VIRTUAL_ENV /env
-ENV PATH /env/bin:$PATH
-
-RUN apt-get update && apt-get install -y python-opencv
-
-# Copy the application's requirements.txt and run pip to install all
-# dependencies into the virtualenv.
-ADD requirements.txt /app/requirements.txt
-RUN pip install -r /app/requirements.txt
-
-# Add the application source code.
-ADD . /app
-
-# Run a WSGI server to serve the application. gunicorn must be declared as
-# a dependency in requirements.txt.
-CMD gunicorn -b :$PORT main:app
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py
deleted file mode 100644
index 8663097b447cdd80c52e2b2abde33a4736ddb9c2..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""Utilities to lazily create and visit candidates found.
-
-Creating and visiting a candidate is a *very* costly operation. It involves
-fetching, extracting, potentially building modules from source, and verifying
-distribution metadata. It is therefore crucial for performance to keep
-everything here lazy all the way down, so we only touch candidates that we
-absolutely need, and not "download the world" when we only need one version of
-something.
-"""
-
-import functools
-from collections.abc import Sequence
-from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Set, Tuple
-
-from pip._vendor.packaging.version import _BaseVersion
-
-from .base import Candidate
-
-IndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]]
-
-if TYPE_CHECKING:
- SequenceCandidate = Sequence[Candidate]
-else:
- # For compatibility: Python before 3.9 does not support using [] on the
- # Sequence class.
- #
- # >>> from collections.abc import Sequence
- # >>> Sequence[str]
- # Traceback (most recent call last):
- # File "", line 1, in
- # TypeError: 'ABCMeta' object is not subscriptable
- #
- # TODO: Remove this block after dropping Python 3.8 support.
- SequenceCandidate = Sequence
-
-
-def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]:
- """Iterator for ``FoundCandidates``.
-
- This iterator is used when the package is not already installed. Candidates
- from index come later in their normal ordering.
- """
- versions_found: Set[_BaseVersion] = set()
- for version, func in infos:
- if version in versions_found:
- continue
- candidate = func()
- if candidate is None:
- continue
- yield candidate
- versions_found.add(version)
-
-
-def _iter_built_with_prepended(
- installed: Candidate, infos: Iterator[IndexCandidateInfo]
-) -> Iterator[Candidate]:
- """Iterator for ``FoundCandidates``.
-
- This iterator is used when the resolver prefers the already-installed
- candidate and NOT to upgrade. The installed candidate is therefore
- always yielded first, and candidates from index come later in their
- normal ordering, except skipped when the version is already installed.
- """
- yield installed
- versions_found: Set[_BaseVersion] = {installed.version}
- for version, func in infos:
- if version in versions_found:
- continue
- candidate = func()
- if candidate is None:
- continue
- yield candidate
- versions_found.add(version)
-
-
-def _iter_built_with_inserted(
- installed: Candidate, infos: Iterator[IndexCandidateInfo]
-) -> Iterator[Candidate]:
- """Iterator for ``FoundCandidates``.
-
- This iterator is used when the resolver prefers to upgrade an
- already-installed package. Candidates from index are returned in their
- normal ordering, except replaced when the version is already installed.
-
- The implementation iterates through and yields other candidates, inserting
- the installed candidate exactly once before we start yielding older or
- equivalent candidates, or after all other candidates if they are all newer.
- """
- versions_found: Set[_BaseVersion] = set()
- for version, func in infos:
- if version in versions_found:
- continue
- # If the installed candidate is better, yield it first.
- if installed.version >= version:
- yield installed
- versions_found.add(installed.version)
- candidate = func()
- if candidate is None:
- continue
- yield candidate
- versions_found.add(version)
-
- # If the installed candidate is older than all other candidates.
- if installed.version not in versions_found:
- yield installed
-
-
-class FoundCandidates(SequenceCandidate):
- """A lazy sequence to provide candidates to the resolver.
-
- The intended usage is to return this from `find_matches()` so the resolver
- can iterate through the sequence multiple times, but only access the index
- page when remote packages are actually needed. This improve performances
- when suitable candidates are already installed on disk.
- """
-
- def __init__(
- self,
- get_infos: Callable[[], Iterator[IndexCandidateInfo]],
- installed: Optional[Candidate],
- prefers_installed: bool,
- incompatible_ids: Set[int],
- ):
- self._get_infos = get_infos
- self._installed = installed
- self._prefers_installed = prefers_installed
- self._incompatible_ids = incompatible_ids
-
- def __getitem__(self, index: Any) -> Any:
- # Implemented to satisfy the ABC check. This is not needed by the
- # resolver, and should not be used by the provider either (for
- # performance reasons).
- raise NotImplementedError("don't do this")
-
- def __iter__(self) -> Iterator[Candidate]:
- infos = self._get_infos()
- if not self._installed:
- iterator = _iter_built(infos)
- elif self._prefers_installed:
- iterator = _iter_built_with_prepended(self._installed, infos)
- else:
- iterator = _iter_built_with_inserted(self._installed, infos)
- return (c for c in iterator if id(c) not in self._incompatible_ids)
-
- def __len__(self) -> int:
- # Implemented to satisfy the ABC check. This is not needed by the
- # resolver, and should not be used by the provider either (for
- # performance reasons).
- raise NotImplementedError("don't do this")
-
- @functools.lru_cache(maxsize=1)
- def __bool__(self) -> bool:
- if self._prefers_installed and self._installed:
- return True
- return any(self)
diff --git a/spaces/algomuffin/jojo_fork/e4e/utils/data_utils.py b/spaces/algomuffin/jojo_fork/e4e/utils/data_utils.py
deleted file mode 100644
index f1ba79f4a2d5cc2b97dce76d87bf6e7cdebbc257..0000000000000000000000000000000000000000
--- a/spaces/algomuffin/jojo_fork/e4e/utils/data_utils.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-Code adopted from pix2pixHD:
-https://github.com/NVIDIA/pix2pixHD/blob/master/data/image_folder.py
-"""
-import os
-
-IMG_EXTENSIONS = [
- '.jpg', '.JPG', '.jpeg', '.JPEG',
- '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
-]
-
-
-def is_image_file(filename):
- return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
-
-
-def make_dataset(dir):
- images = []
- assert os.path.isdir(dir), '%s is not a valid directory' % dir
- for root, _, fnames in sorted(os.walk(dir)):
- for fname in fnames:
- if is_image_file(fname):
- path = os.path.join(root, fname)
- images.append(path)
- return images
diff --git a/spaces/allknowingroger/Image-Models-Test159/app.py b/spaces/allknowingroger/Image-Models-Test159/app.py
deleted file mode 100644
index cf0d7c5a3940355f892d09907a0ef0bc1cbc3392..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test159/app.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import gradio as gr
-# import os
-# import sys
-# from pathlib import Path
-import time
-
-models =[
- "LinoyTsaban/lora-xl-linoy_face-0.0001-5e-05-1000-1-32",
- "BHAVITHAREDDY/young-ladies-walking-on-streets-of-mystic-falls-xzg",
- "hosnasn/Reza_DB200",
- "dbecker1/test_lora_mdl4",
- "hosnasn/Reza_DB",
- "Gurusha/dreambooth_peace_sign",
- "Gurusha/dreambooth_holding_umbrella",
- "lccllccc/0920_sdxl_lora_2500_steps",
- "LinoyTsaban/lora-xl-linoy_face-0.0001-0.0001-1000-1-32",
-]
-
-
-model_functions = {}
-model_idx = 1
-for model_path in models:
- try:
- model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False)
- except Exception as error:
- def the_fn(txt):
- return None
- model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"])
- model_idx+=1
-
-
-def send_it_idx(idx):
- def send_it_fn(prompt):
- output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt)
- return output
- return send_it_fn
-
-def get_prompts(prompt_text):
- return prompt_text
-
-def clear_it(val):
- if int(val) != 0:
- val = 0
- else:
- val = 0
- pass
- return val
-
-def all_task_end(cnt,t_stamp):
- to = t_stamp + 60
- et = time.time()
- if et > to and t_stamp != 0:
- d = gr.update(value=0)
- tog = gr.update(value=1)
- #print(f'to: {to} et: {et}')
- else:
- if cnt != 0:
- d = gr.update(value=et)
- else:
- d = gr.update(value=0)
- tog = gr.update(value=0)
- #print (f'passing: to: {to} et: {et}')
- pass
- return d, tog
-
-def all_task_start():
- print("\n\n\n\n\n\n\n")
- t = time.gmtime()
- t_stamp = time.time()
- current_time = time.strftime("%H:%M:%S", t)
- return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0)
-
-def clear_fn():
- nn = len(models)
- return tuple([None, *[None for _ in range(nn)]])
-
-
-
-with gr.Blocks(title="SD Models") as my_interface:
- with gr.Column(scale=12):
- # with gr.Row():
- # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""")
- with gr.Row():
- with gr.Row(scale=6):
- primary_prompt=gr.Textbox(label="Prompt", value="")
- # real_prompt=gr.Textbox(label="Real prompt")
- with gr.Row(scale=6):
- # improve_prompts_btn=gr.Button("Improve")
- with gr.Row():
- run=gr.Button("Run",variant="primary")
- clear_btn=gr.Button("Clear")
- with gr.Row():
- sd_outputs = {}
- model_idx = 1
- for model_path in models:
- with gr.Column(scale=3, min_width=320):
- with gr.Box():
- sd_outputs[model_idx] = gr.Image(label=model_path)
- pass
- model_idx += 1
- pass
- pass
-
- with gr.Row(visible=False):
- start_box=gr.Number(interactive=False)
- end_box=gr.Number(interactive=False)
- tog_box=gr.Textbox(value=0,interactive=False)
-
- start_box.change(
- all_task_end,
- [start_box, end_box],
- [start_box, tog_box],
- every=1,
- show_progress=False)
-
- primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box])
- run.click(all_task_start, None, [start_box, end_box, tog_box])
- runs_dict = {}
- model_idx = 1
- for model_path in models:
- runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]])
- model_idx += 1
- pass
- pass
-
- # improve_prompts_btn_clicked=improve_prompts_btn.click(
- # get_prompts,
- # inputs=[primary_prompt],
- # outputs=[primary_prompt],
- # cancels=list(runs_dict.values()))
- clear_btn.click(
- clear_fn,
- None,
- [primary_prompt, *list(sd_outputs.values())],
- cancels=[*list(runs_dict.values())])
- tog_box.change(
- clear_it,
- tog_box,
- tog_box,
- cancels=[*list(runs_dict.values())])
-
-my_interface.queue(concurrency_count=600, status_update_rate=1)
-my_interface.launch(inline=True, show_api=False)
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test40/app.py b/spaces/allknowingroger/Image-Models-Test40/app.py
deleted file mode 100644
index 5b47c317c945fff8d73e6e360fdff0a832a89ab9..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test40/app.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import gradio as gr
-# import os
-# import sys
-# from pathlib import Path
-import time
-
-models =[
- "Revanthraja/Fashion",
- "michaelee0407/path-to-save-model",
- "digiplay/PhotoSomnia_vFinal",
- "digiplay/2K",
- "bharadwajkg/finetune-stable-diffusion-v1-4-planogram-lora-data3",
- "SojiLee/modelka-icons-style",
- "Yntec/GOLDFish",
- "samarthum/model",
- "navyatiwari11/my-pet-cat-nxt",
-]
-
-
-model_functions = {}
-model_idx = 1
-for model_path in models:
- try:
- model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False)
- except Exception as error:
- def the_fn(txt):
- return None
- model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"])
- model_idx+=1
-
-
-def send_it_idx(idx):
- def send_it_fn(prompt):
- output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt)
- return output
- return send_it_fn
-
-def get_prompts(prompt_text):
- return prompt_text
-
-def clear_it(val):
- if int(val) != 0:
- val = 0
- else:
- val = 0
- pass
- return val
-
-def all_task_end(cnt,t_stamp):
- to = t_stamp + 60
- et = time.time()
- if et > to and t_stamp != 0:
- d = gr.update(value=0)
- tog = gr.update(value=1)
- #print(f'to: {to} et: {et}')
- else:
- if cnt != 0:
- d = gr.update(value=et)
- else:
- d = gr.update(value=0)
- tog = gr.update(value=0)
- #print (f'passing: to: {to} et: {et}')
- pass
- return d, tog
-
-def all_task_start():
- print("\n\n\n\n\n\n\n")
- t = time.gmtime()
- t_stamp = time.time()
- current_time = time.strftime("%H:%M:%S", t)
- return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0)
-
-def clear_fn():
- nn = len(models)
- return tuple([None, *[None for _ in range(nn)]])
-
-
-
-with gr.Blocks(title="SD Models") as my_interface:
- with gr.Column(scale=12):
- # with gr.Row():
- # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""")
- with gr.Row():
- with gr.Row(scale=6):
- primary_prompt=gr.Textbox(label="Prompt", value="")
- # real_prompt=gr.Textbox(label="Real prompt")
- with gr.Row(scale=6):
- # improve_prompts_btn=gr.Button("Improve")
- with gr.Row():
- run=gr.Button("Run",variant="primary")
- clear_btn=gr.Button("Clear")
- with gr.Row():
- sd_outputs = {}
- model_idx = 1
- for model_path in models:
- with gr.Column(scale=3, min_width=320):
- with gr.Box():
- sd_outputs[model_idx] = gr.Image(label=model_path)
- pass
- model_idx += 1
- pass
- pass
-
- with gr.Row(visible=False):
- start_box=gr.Number(interactive=False)
- end_box=gr.Number(interactive=False)
- tog_box=gr.Textbox(value=0,interactive=False)
-
- start_box.change(
- all_task_end,
- [start_box, end_box],
- [start_box, tog_box],
- every=1,
- show_progress=False)
-
- primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box])
- run.click(all_task_start, None, [start_box, end_box, tog_box])
- runs_dict = {}
- model_idx = 1
- for model_path in models:
- runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]])
- model_idx += 1
- pass
- pass
-
- # improve_prompts_btn_clicked=improve_prompts_btn.click(
- # get_prompts,
- # inputs=[primary_prompt],
- # outputs=[primary_prompt],
- # cancels=list(runs_dict.values()))
- clear_btn.click(
- clear_fn,
- None,
- [primary_prompt, *list(sd_outputs.values())],
- cancels=[*list(runs_dict.values())])
- tog_box.change(
- clear_it,
- tog_box,
- tog_box,
- cancels=[*list(runs_dict.values())])
-
-my_interface.queue(concurrency_count=600, status_update_rate=1)
-my_interface.launch(inline=True, show_api=False)
-
\ No newline at end of file
diff --git a/spaces/amitkayal/Article-Rewriter/README.md b/spaces/amitkayal/Article-Rewriter/README.md
deleted file mode 100644
index 939045d0372e7b9c325b936db5fd4fc45ff8ed50..0000000000000000000000000000000000000000
--- a/spaces/amitkayal/Article-Rewriter/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Article Rewriter
-emoji: 📉
-colorFrom: indigo
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.4.1
-app_file: app.py
-pinned: false
-duplicated_from: imseldrith/Article-Rewriter
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/antonovmaxim/text-generation-webui-space/extensions/openai/script.py b/spaces/antonovmaxim/text-generation-webui-space/extensions/openai/script.py
deleted file mode 100644
index 712cfe3887338bbb6c3d301817bd98c159bf63d5..0000000000000000000000000000000000000000
--- a/spaces/antonovmaxim/text-generation-webui-space/extensions/openai/script.py
+++ /dev/null
@@ -1,701 +0,0 @@
-import base64
-import json
-import os
-import time
-import requests
-import yaml
-from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
-from threading import Thread
-
-import numpy as np
-
-from modules import shared
-from modules.text_generation import encode, generate_reply
-
-params = {
- 'port': int(os.environ.get('OPENEDAI_PORT')) if 'OPENEDAI_PORT' in os.environ else 5001,
-}
-
-debug = True if 'OPENEDAI_DEBUG' in os.environ else False
-
-# Optional, install the module and download the model to enable
-# v1/embeddings
-try:
- from sentence_transformers import SentenceTransformer
-except ImportError:
- pass
-
-st_model = os.environ["OPENEDAI_EMBEDDING_MODEL"] if "OPENEDAI_EMBEDDING_MODEL" in os.environ else "all-mpnet-base-v2"
-embedding_model = None
-
-standard_stopping_strings = ['\nsystem:', '\nuser:', '\nhuman:', '\nassistant:', '\n###', ]
-
-# little helper to get defaults if arg is present but None and should be the same type as default.
-def default(dic, key, default):
- val = dic.get(key, default)
- if type(val) != type(default):
- # maybe it's just something like 1 instead of 1.0
- try:
- v = type(default)(val)
- if type(val)(v) == val: # if it's the same value passed in, it's ok.
- return v
- except:
- pass
-
- val = default
- return val
-
-
-def clamp(value, minvalue, maxvalue):
- return max(minvalue, min(value, maxvalue))
-
-
-def deduce_template():
- # Alpaca is verbose so a good default prompt
- default_template = (
- "Below is an instruction that describes a task, paired with an input that provides further context. "
- "Write a response that appropriately completes the request.\n\n"
- "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
- )
-
- # Use the special instruction/input/response template for anything trained like Alpaca
- if shared.settings['instruction_template'] in ['Alpaca', 'Alpaca-Input']:
- return default_template
-
- try:
- instruct = yaml.safe_load(open(f"characters/instruction-following/{shared.settings['instruction_template']}.yaml", 'r'))
-
- template = instruct['turn_template']
- template = template\
- .replace('<|user|>', instruct.get('user', ''))\
- .replace('<|bot|>', instruct.get('bot', ''))\
- .replace('<|user-message|>', '{instruction}\n{input}')
- return instruct.get('context', '') + template[:template.find('<|bot-message|>')].rstrip(' ')
- except:
- return default_template
-
-
-def float_list_to_base64(float_list):
- # Convert the list to a float32 array that the OpenAPI client expects
- float_array = np.array(float_list, dtype="float32")
-
- # Get raw bytes
- bytes_array = float_array.tobytes()
-
- # Encode bytes into base64
- encoded_bytes = base64.b64encode(bytes_array)
-
- # Turn raw base64 encoded bytes into ASCII
- ascii_string = encoded_bytes.decode('ascii')
- return ascii_string
-
-
-class Handler(BaseHTTPRequestHandler):
- def do_GET(self):
- if self.path.startswith('/v1/models'):
-
- self.send_response(200)
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- # TODO: list all models and allow model changes via API? Lora's?
- # This API should list capabilities, limits and pricing...
- models = [{
- "id": shared.model_name, # The real chat/completions model
- "object": "model",
- "owned_by": "user",
- "permission": []
- }, {
- "id": st_model, # The real sentence transformer embeddings model
- "object": "model",
- "owned_by": "user",
- "permission": []
- }, { # these are expected by so much, so include some here as a dummy
- "id": "gpt-3.5-turbo", # /v1/chat/completions
- "object": "model",
- "owned_by": "user",
- "permission": []
- }, {
- "id": "text-curie-001", # /v1/completions, 2k context
- "object": "model",
- "owned_by": "user",
- "permission": []
- }, {
- "id": "text-davinci-002", # /v1/embeddings text-embedding-ada-002:1536, text-davinci-002:768
- "object": "model",
- "owned_by": "user",
- "permission": []
- }]
-
- response = ''
- if self.path == '/v1/models':
- response = json.dumps({
- "object": "list",
- "data": models,
- })
- else:
- the_model_name = self.path[len('/v1/models/'):]
- response = json.dumps({
- "id": the_model_name,
- "object": "model",
- "owned_by": "user",
- "permission": []
- })
-
- self.wfile.write(response.encode('utf-8'))
- else:
- self.send_error(404)
-
- def do_POST(self):
- if debug:
- print(self.headers) # did you know... python-openai sends your linux kernel & python version?
- content_length = int(self.headers['Content-Length'])
- body = json.loads(self.rfile.read(content_length).decode('utf-8'))
-
- if debug:
- print(body)
-
- if '/completions' in self.path or '/generate' in self.path:
- is_legacy = '/generate' in self.path
- is_chat = 'chat' in self.path
- resp_list = 'data' if is_legacy else 'choices'
-
- # XXX model is ignored for now
- # model = body.get('model', shared.model_name) # ignored, use existing for now
- model = shared.model_name
- created_time = int(time.time())
- cmpl_id = "conv-%d" % (created_time)
-
- # Try to use openai defaults or map them to something with the same intent
- stopping_strings = default(shared.settings, 'custom_stopping_strings', [])
- if 'stop' in body:
- if isinstance(body['stop'], str):
- stopping_strings = [body['stop']]
- elif isinstance(body['stop'], list):
- stopping_strings = body['stop']
-
- truncation_length = default(shared.settings, 'truncation_length', 2048)
- truncation_length = clamp(default(body, 'truncation_length', truncation_length), 1, truncation_length)
-
- default_max_tokens = truncation_length if is_chat else 16 # completions default, chat default is 'inf' so we need to cap it.
-
- max_tokens_str = 'length' if is_legacy else 'max_tokens'
- max_tokens = default(body, max_tokens_str, default(shared.settings, 'max_new_tokens', default_max_tokens))
-
- # hard scale this, assuming the given max is for GPT3/4, perhaps inspect the requested model and lookup the context max
- while truncation_length <= max_tokens:
- max_tokens = max_tokens // 2
-
- req_params = {
- 'max_new_tokens': max_tokens,
- 'temperature': default(body, 'temperature', 1.0),
- 'top_p': default(body, 'top_p', 1.0),
- 'top_k': default(body, 'best_of', 1),
- # XXX not sure about this one, seems to be the right mapping, but the range is different (-2..2.0) vs 0..2
- # 0 is default in openai, but 1.0 is default in other places. Maybe it's scaled? scale it.
- 'repetition_penalty': 1.18, # (default(body, 'presence_penalty', 0) + 2.0 ) / 2.0, # 0 the real default, 1.2 is the model default, but 1.18 works better.
- # XXX not sure about this one either, same questions. (-2..2.0), 0 is default not 1.0, scale it.
- 'encoder_repetition_penalty': 1.0, # (default(body, 'frequency_penalty', 0) + 2.0) / 2.0,
- 'suffix': body.get('suffix', None),
- 'stream': default(body, 'stream', False),
- 'echo': default(body, 'echo', False),
- #####################################################
- 'seed': shared.settings.get('seed', -1),
- # int(body.get('n', 1)) # perhaps this should be num_beams or chat_generation_attempts? 'n' doesn't have a direct map
- # unofficial, but it needs to get set anyways.
- 'truncation_length': truncation_length,
- # no more args.
- 'add_bos_token': shared.settings.get('add_bos_token', True),
- 'do_sample': True,
- 'typical_p': 1.0,
- 'min_length': 0,
- 'no_repeat_ngram_size': 0,
- 'num_beams': 1,
- 'penalty_alpha': 0.0,
- 'length_penalty': 1,
- 'early_stopping': False,
- 'ban_eos_token': False,
- 'skip_special_tokens': True,
- }
-
- # fixup absolute 0.0's
- for par in ['temperature', 'repetition_penalty', 'encoder_repetition_penalty']:
- req_params[par] = clamp(req_params[par], 0.001, 1.999)
-
- self.send_response(200)
- if req_params['stream']:
- self.send_header('Content-Type', 'text/event-stream')
- self.send_header('Cache-Control', 'no-cache')
- # self.send_header('Connection', 'keep-alive')
- else:
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- token_count = 0
- completion_token_count = 0
- prompt = ''
- stream_object_type = ''
- object_type = ''
-
- if is_chat:
- stream_object_type = 'chat.completions.chunk'
- object_type = 'chat.completions'
-
- messages = body['messages']
-
- system_msg = '' # You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible. Knowledge cutoff: {knowledge_cutoff} Current date: {current_date}
- if 'prompt' in body: # Maybe they sent both? This is not documented in the API, but some clients seem to do this.
- system_msg = body['prompt']
-
- chat_msgs = []
-
- for m in messages:
- role = m['role']
- content = m['content']
- # name = m.get('name', 'user')
- if role == 'system':
- system_msg += content
- else:
- chat_msgs.extend([f"\n{role}: {content.strip()}"]) # Strip content? linefeed?
-
- system_token_count = len(encode(system_msg)[0])
- remaining_tokens = req_params['truncation_length'] - req_params['max_new_tokens'] - system_token_count
- chat_msg = ''
-
- while chat_msgs:
- new_msg = chat_msgs.pop()
- new_size = len(encode(new_msg)[0])
- if new_size <= remaining_tokens:
- chat_msg = new_msg + chat_msg
- remaining_tokens -= new_size
- else:
- # TODO: clip a message to fit?
- # ie. user: ...
- break
-
- if len(chat_msgs) > 0:
- print(f"truncating chat messages, dropping {len(chat_msgs)} messages.")
-
- if system_msg:
- prompt = 'system: ' + system_msg + '\n' + chat_msg + '\nassistant: '
- else:
- prompt = chat_msg + '\nassistant: '
-
- token_count = len(encode(prompt)[0])
-
- # pass with some expected stop strings.
- # some strange cases of "##| Instruction: " sneaking through.
- stopping_strings += standard_stopping_strings
- req_params['custom_stopping_strings'] = stopping_strings
- else:
- stream_object_type = 'text_completion.chunk'
- object_type = 'text_completion'
-
- # ... encoded as a string, array of strings, array of tokens, or array of token arrays.
- if is_legacy:
- prompt = body['context'] # Older engines.generate API
- else:
- prompt = body['prompt'] # XXX this can be different types
-
- if isinstance(prompt, list):
- prompt = ''.join(prompt) # XXX this is wrong... need to split out to multiple calls?
-
- token_count = len(encode(prompt)[0])
- if token_count >= req_params['truncation_length']:
- new_len = int(len(prompt) * (float(shared.settings['truncation_length']) - req_params['max_new_tokens']) / token_count)
- prompt = prompt[-new_len:]
- print(f"truncating prompt to {new_len} characters, was {token_count} tokens. Now: {len(encode(prompt)[0])} tokens.")
-
- # pass with some expected stop strings.
- # some strange cases of "##| Instruction: " sneaking through.
- stopping_strings += standard_stopping_strings
- req_params['custom_stopping_strings'] = stopping_strings
-
- if req_params['stream']:
- shared.args.chat = True
- # begin streaming
- chunk = {
- "id": cmpl_id,
- "object": stream_object_type,
- "created": created_time,
- "model": shared.model_name,
- resp_list: [{
- "index": 0,
- "finish_reason": None,
- }],
- }
-
- if stream_object_type == 'text_completion.chunk':
- chunk[resp_list][0]["text"] = ""
- else:
- # This is coming back as "system" to the openapi cli, not sure why.
- # So yeah... do both methods? delta and messages.
- chunk[resp_list][0]["message"] = {'role': 'assistant', 'content': ''}
- chunk[resp_list][0]["delta"] = {'role': 'assistant', 'content': ''}
- # { "role": "assistant" }
-
- response = 'data: ' + json.dumps(chunk) + '\n'
- self.wfile.write(response.encode('utf-8'))
-
- # generate reply #######################################
- if debug:
- print({'prompt': prompt, 'req_params': req_params, 'stopping_strings': stopping_strings})
- generator = generate_reply(prompt, req_params, stopping_strings=stopping_strings, is_chat=False)
-
- answer = ''
- seen_content = ''
- longest_stop_len = max([len(x) for x in stopping_strings])
-
- for a in generator:
- answer = a
-
- stop_string_found = False
- len_seen = len(seen_content)
- search_start = max(len_seen - longest_stop_len, 0)
-
- for string in stopping_strings:
- idx = answer.find(string, search_start)
- if idx != -1:
- answer = answer[:idx] # clip it.
- stop_string_found = True
-
- if stop_string_found:
- break
-
- # If something like "\nYo" is generated just before "\nYou:"
- # is completed, buffer and generate more, don't send it
- buffer_and_continue = False
-
- for string in stopping_strings:
- for j in range(len(string) - 1, 0, -1):
- if answer[-j:] == string[:j]:
- buffer_and_continue = True
- break
- else:
- continue
- break
-
- if buffer_and_continue:
- continue
-
- if req_params['stream']:
- # Streaming
- new_content = answer[len_seen:]
-
- if not new_content or chr(0xfffd) in new_content: # partial unicode character, don't send it yet.
- continue
-
- seen_content = answer
- chunk = {
- "id": cmpl_id,
- "object": stream_object_type,
- "created": created_time,
- "model": shared.model_name,
- resp_list: [{
- "index": 0,
- "finish_reason": None,
- }],
- }
- if stream_object_type == 'text_completion.chunk':
- chunk[resp_list][0]['text'] = new_content
- else:
- # So yeah... do both methods? delta and messages.
- chunk[resp_list][0]['message'] = {'content': new_content}
- chunk[resp_list][0]['delta'] = {'content': new_content}
- response = 'data: ' + json.dumps(chunk) + '\n'
- self.wfile.write(response.encode('utf-8'))
- completion_token_count += len(encode(new_content)[0])
-
- if req_params['stream']:
- chunk = {
- "id": cmpl_id,
- "object": stream_object_type,
- "created": created_time,
- "model": model, # TODO: add Lora info?
- resp_list: [{
- "index": 0,
- "finish_reason": "stop",
- }],
- "usage": {
- "prompt_tokens": token_count,
- "completion_tokens": completion_token_count,
- "total_tokens": token_count + completion_token_count
- }
- }
- if stream_object_type == 'text_completion.chunk':
- chunk[resp_list][0]['text'] = ''
- else:
- # So yeah... do both methods? delta and messages.
- chunk[resp_list][0]['message'] = {'content': ''}
- chunk[resp_list][0]['delta'] = {}
- response = 'data: ' + json.dumps(chunk) + '\ndata: [DONE]\n'
- self.wfile.write(response.encode('utf-8'))
- # Finished if streaming.
- if debug:
- print({'response': answer})
- return
-
- if debug:
- print({'response': answer})
-
- completion_token_count = len(encode(answer)[0])
- stop_reason = "stop"
- if token_count + completion_token_count >= req_params['truncation_length']:
- stop_reason = "length"
-
- resp = {
- "id": cmpl_id,
- "object": object_type,
- "created": created_time,
- "model": model, # TODO: add Lora info?
- resp_list: [{
- "index": 0,
- "finish_reason": stop_reason,
- }],
- "usage": {
- "prompt_tokens": token_count,
- "completion_tokens": completion_token_count,
- "total_tokens": token_count + completion_token_count
- }
- }
-
- if is_chat:
- resp[resp_list][0]["message"] = {"role": "assistant", "content": answer}
- else:
- resp[resp_list][0]["text"] = answer
-
- response = json.dumps(resp)
- self.wfile.write(response.encode('utf-8'))
- elif '/edits' in self.path:
- self.send_response(200)
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- created_time = int(time.time())
-
- # Using Alpaca format, this may work with other models too.
- instruction = body['instruction']
- input = body.get('input', '')
-
- instruction_template = deduce_template()
- edit_task = instruction_template.format(instruction=instruction, input=input)
-
- truncation_length = default(shared.settings, 'truncation_length', 2048)
- token_count = len(encode(edit_task)[0])
- max_tokens = truncation_length - token_count
-
- req_params = {
- 'max_new_tokens': max_tokens,
- 'temperature': clamp(default(body, 'temperature', 1.0), 0.001, 1.999),
- 'top_p': clamp(default(body, 'top_p', 1.0), 0.001, 1.0),
- 'top_k': 1,
- 'repetition_penalty': 1.18,
- 'encoder_repetition_penalty': 1.0,
- 'suffix': None,
- 'stream': False,
- 'echo': False,
- 'seed': shared.settings.get('seed', -1),
- # 'n' : default(body, 'n', 1), # 'n' doesn't have a direct map
- 'truncation_length': truncation_length,
- 'add_bos_token': shared.settings.get('add_bos_token', True),
- 'do_sample': True,
- 'typical_p': 1.0,
- 'min_length': 0,
- 'no_repeat_ngram_size': 0,
- 'num_beams': 1,
- 'penalty_alpha': 0.0,
- 'length_penalty': 1,
- 'early_stopping': False,
- 'ban_eos_token': False,
- 'skip_special_tokens': True,
- 'custom_stopping_strings': [],
- }
-
- if debug:
- print({'edit_template': edit_task, 'req_params': req_params, 'token_count': token_count})
-
- generator = generate_reply(edit_task, req_params, stopping_strings=standard_stopping_strings, is_chat=False)
-
- answer = ''
- for a in generator:
- answer = a
-
- # some reply's have an extra leading space to fit the instruction template, just clip it off from the reply.
- if edit_task[-1] != '\n' and answer and answer[0] == ' ':
- answer = answer[1:]
-
- completion_token_count = len(encode(answer)[0])
-
- resp = {
- "object": "edit",
- "created": created_time,
- "choices": [{
- "text": answer,
- "index": 0,
- }],
- "usage": {
- "prompt_tokens": token_count,
- "completion_tokens": completion_token_count,
- "total_tokens": token_count + completion_token_count
- }
- }
-
- if debug:
- print({'answer': answer, 'completion_token_count': completion_token_count})
-
- response = json.dumps(resp)
- self.wfile.write(response.encode('utf-8'))
- elif '/images/generations' in self.path and 'SD_WEBUI_URL' in os.environ:
- # Stable Diffusion callout wrapper for txt2img
- # Low effort implementation for compatibility. With only "prompt" being passed and assuming DALL-E
- # the results will be limited and likely poor. SD has hundreds of models and dozens of settings.
- # If you want high quality tailored results you should just use the Stable Diffusion API directly.
- # it's too general an API to try and shape the result with specific tags like "masterpiece", etc,
- # Will probably work best with the stock SD models.
- # SD configuration is beyond the scope of this API.
- # At this point I will not add the edits and variations endpoints (ie. img2img) because they
- # require changing the form data handling to accept multipart form data, also to properly support
- # url return types will require file management and a web serving files... Perhaps later!
-
- self.send_response(200)
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- width, height = [ int(x) for x in default(body, 'size', '1024x1024').split('x') ] # ignore the restrictions on size
- response_format = default(body, 'response_format', 'url') # or b64_json
-
- payload = {
- 'prompt': body['prompt'], # ignore prompt limit of 1000 characters
- 'width': width,
- 'height': height,
- 'batch_size': default(body, 'n', 1) # ignore the batch limits of max 10
- }
-
- resp = {
- 'created': int(time.time()),
- 'data': []
- }
-
- # TODO: support SD_WEBUI_AUTH username:password pair.
- sd_url = f"{os.environ['SD_WEBUI_URL']}/sdapi/v1/txt2img"
-
- response = requests.post(url=sd_url, json=payload)
- r = response.json()
- # r['parameters']...
- for b64_json in r['images']:
- if response_format == 'b64_json':
- resp['data'].extend([{'b64_json': b64_json}])
- else:
- resp['data'].extend([{'url': f'data:image/png;base64,{b64_json}'}]) # yeah it's lazy. requests.get() will not work with this
-
- response = json.dumps(resp)
- self.wfile.write(response.encode('utf-8'))
- elif '/embeddings' in self.path and embedding_model is not None:
- self.send_response(200)
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- input = body['input'] if 'input' in body else body['text']
- if type(input) is str:
- input = [input]
-
- embeddings = embedding_model.encode(input).tolist()
-
- def enc_emb(emb):
- # If base64 is specified, encode. Otherwise, do nothing.
- if body.get("encoding_format", "") == "base64":
- return float_list_to_base64(emb)
- else:
- return emb
- data = [{"object": "embedding", "embedding": enc_emb(emb), "index": n} for n, emb in enumerate(embeddings)]
-
- response = json.dumps({
- "object": "list",
- "data": data,
- "model": st_model, # return the real model
- "usage": {
- "prompt_tokens": 0,
- "total_tokens": 0,
- }
- })
-
- if debug:
- print(f"Embeddings return size: {len(embeddings[0])}, number: {len(embeddings)}")
- self.wfile.write(response.encode('utf-8'))
- elif '/moderations' in self.path:
- # for now do nothing, just don't error.
- self.send_response(200)
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- response = json.dumps({
- "id": "modr-5MWoLO",
- "model": "text-moderation-001",
- "results": [{
- "categories": {
- "hate": False,
- "hate/threatening": False,
- "self-harm": False,
- "sexual": False,
- "sexual/minors": False,
- "violence": False,
- "violence/graphic": False
- },
- "category_scores": {
- "hate": 0.0,
- "hate/threatening": 0.0,
- "self-harm": 0.0,
- "sexual": 0.0,
- "sexual/minors": 0.0,
- "violence": 0.0,
- "violence/graphic": 0.0
- },
- "flagged": False
- }]
- })
- self.wfile.write(response.encode('utf-8'))
-
- elif self.path == '/api/v1/token-count':
- # NOT STANDARD. lifted from the api extension, but it's still very useful to calculate tokenized length client side.
- self.send_response(200)
- self.send_header('Content-Type', 'application/json')
- self.end_headers()
-
- tokens = encode(body['prompt'])[0]
- response = json.dumps({
- 'results': [{
- 'tokens': len(tokens)
- }]
- })
- self.wfile.write(response.encode('utf-8'))
- else:
- print(self.path, self.headers)
- self.send_error(404)
-
-
-def run_server():
- global embedding_model
- try:
- embedding_model = SentenceTransformer(st_model)
- print(f"\nLoaded embedding model: {st_model}, max sequence length: {embedding_model.max_seq_length}")
- except:
- print(f"\nFailed to load embedding model: {st_model}")
- pass
-
- server_addr = ('0.0.0.0' if shared.args.listen else '127.0.0.1', params['port'])
- server = ThreadingHTTPServer(server_addr, Handler)
- if shared.args.share:
- try:
- from flask_cloudflared import _run_cloudflared
- public_url = _run_cloudflared(params['port'], params['port'] + 1)
- print(f'Starting OpenAI compatible api at\nOPENAI_API_BASE={public_url}/v1')
- except ImportError:
- print('You should install flask_cloudflared manually')
- else:
- print(f'Starting OpenAI compatible api:\nOPENAI_API_BASE=http://{server_addr[0]}:{server_addr[1]}/v1')
-
- server.serve_forever()
-
-
-def setup():
- Thread(target=run_server, daemon=True).start()
diff --git a/spaces/aodianyun/stable-diffusion-webui/modules/api/models.py b/spaces/aodianyun/stable-diffusion-webui/modules/api/models.py
deleted file mode 100644
index cba43d3b1807d547acda33256faf5db05dd216a6..0000000000000000000000000000000000000000
--- a/spaces/aodianyun/stable-diffusion-webui/modules/api/models.py
+++ /dev/null
@@ -1,269 +0,0 @@
-import inspect
-from pydantic import BaseModel, Field, create_model
-from typing import Any, Optional
-from typing_extensions import Literal
-from inflection import underscore
-from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
-from modules.shared import sd_upscalers, opts, parser
-from typing import Dict, List
-
-API_NOT_ALLOWED = [
- "self",
- "kwargs",
- "sd_model",
- "outpath_samples",
- "outpath_grids",
- "sampler_index",
- "do_not_save_samples",
- "do_not_save_grid",
- "extra_generation_params",
- "overlay_images",
- "do_not_reload_embeddings",
- "seed_enable_extras",
- "prompt_for_display",
- "sampler_noise_scheduler_override",
- "ddim_discretize"
-]
-
-class ModelDef(BaseModel):
- """Assistance Class for Pydantic Dynamic Model Generation"""
-
- field: str
- field_alias: str
- field_type: Any
- field_value: Any
- field_exclude: bool = False
-
-
-class PydanticModelGenerator:
- """
- Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about:
- source_data is a snapshot of the default values produced by the class
- params are the names of the actual keys required by __init__
- """
-
- def __init__(
- self,
- model_name: str = None,
- class_instance = None,
- additional_fields = None,
- ):
- def field_type_generator(k, v):
- # field_type = str if not overrides.get(k) else overrides[k]["type"]
- # print(k, v.annotation, v.default)
- field_type = v.annotation
-
- return Optional[field_type]
-
- def merge_class_params(class_):
- all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_)))
- parameters = {}
- for classes in all_classes:
- parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
- return parameters
-
-
- self._model_name = model_name
- self._class_data = merge_class_params(class_instance)
-
- self._model_def = [
- ModelDef(
- field=underscore(k),
- field_alias=k,
- field_type=field_type_generator(k, v),
- field_value=v.default
- )
- for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
- ]
-
- for fields in additional_fields:
- self._model_def.append(ModelDef(
- field=underscore(fields["key"]),
- field_alias=fields["key"],
- field_type=fields["type"],
- field_value=fields["default"],
- field_exclude=fields["exclude"] if "exclude" in fields else False))
-
- def generate_model(self):
- """
- Creates a pydantic BaseModel
- from the json and overrides provided at initialization
- """
- fields = {
- d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def
- }
- DynamicModel = create_model(self._model_name, **fields)
- DynamicModel.__config__.allow_population_by_field_name = True
- DynamicModel.__config__.allow_mutation = True
- return DynamicModel
-
-StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
- "StableDiffusionProcessingTxt2Img",
- StableDiffusionProcessingTxt2Img,
- [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}]
-).generate_model()
-
-StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
- "StableDiffusionProcessingImg2Img",
- StableDiffusionProcessingImg2Img,
- [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}]
-).generate_model()
-
-class TextToImageResponse(BaseModel):
- images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
- parameters: dict
- info: str
-
-class ImageToImageResponse(BaseModel):
- images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
- parameters: dict
- info: str
-
-class ExtrasBaseRequest(BaseModel):
- resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.")
- show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?")
- gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
- codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
- codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
- upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
- upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
- upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
- upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
- upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
- upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
- extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
- upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
-
-class ExtraBaseResponse(BaseModel):
- html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
-
-class ExtrasSingleImageRequest(ExtrasBaseRequest):
- image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
-
-class ExtrasSingleImageResponse(ExtraBaseResponse):
- image: str = Field(default=None, title="Image", description="The generated image in base64 format.")
-
-class FileData(BaseModel):
- data: str = Field(title="File data", description="Base64 representation of the file")
- name: str = Field(title="File name")
-
-class ExtrasBatchImagesRequest(ExtrasBaseRequest):
- imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
-
-class ExtrasBatchImagesResponse(ExtraBaseResponse):
- images: List[str] = Field(title="Images", description="The generated images in base64 format.")
-
-class PNGInfoRequest(BaseModel):
- image: str = Field(title="Image", description="The base64 encoded PNG image")
-
-class PNGInfoResponse(BaseModel):
- info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
- items: dict = Field(title="Items", description="An object containing all the info the image had")
-
-class ProgressRequest(BaseModel):
- skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
-
-class ProgressResponse(BaseModel):
- progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
- eta_relative: float = Field(title="ETA in secs")
- state: dict = Field(title="State", description="The current state snapshot")
- current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
- textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
-
-class InterrogateRequest(BaseModel):
- image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
- model: str = Field(default="clip", title="Model", description="The interrogate model used.")
-
-class InterrogateResponse(BaseModel):
- caption: str = Field(default=None, title="Caption", description="The generated caption for the image.")
-
-class TrainResponse(BaseModel):
- info: str = Field(title="Train info", description="Response string from train embedding or hypernetwork task.")
-
-class CreateResponse(BaseModel):
- info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
-
-class PreprocessResponse(BaseModel):
- info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
-
-fields = {}
-for key, metadata in opts.data_labels.items():
- value = opts.data.get(key)
- optType = opts.typemap.get(type(metadata.default), type(value))
-
- if (metadata is not None):
- fields.update({key: (Optional[optType], Field(
- default=metadata.default ,description=metadata.label))})
- else:
- fields.update({key: (Optional[optType], Field())})
-
-OptionsModel = create_model("Options", **fields)
-
-flags = {}
-_options = vars(parser)['_option_string_actions']
-for key in _options:
- if(_options[key].dest != 'help'):
- flag = _options[key]
- _type = str
- if _options[key].default is not None: _type = type(_options[key].default)
- flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
-
-FlagsModel = create_model("Flags", **flags)
-
-class SamplerItem(BaseModel):
- name: str = Field(title="Name")
- aliases: List[str] = Field(title="Aliases")
- options: Dict[str, str] = Field(title="Options")
-
-class UpscalerItem(BaseModel):
- name: str = Field(title="Name")
- model_name: Optional[str] = Field(title="Model Name")
- model_path: Optional[str] = Field(title="Path")
- model_url: Optional[str] = Field(title="URL")
- scale: Optional[float] = Field(title="Scale")
-
-class SDModelItem(BaseModel):
- title: str = Field(title="Title")
- model_name: str = Field(title="Model Name")
- hash: Optional[str] = Field(title="Short hash")
- sha256: Optional[str] = Field(title="sha256 hash")
- filename: str = Field(title="Filename")
- config: Optional[str] = Field(title="Config file")
-
-class HypernetworkItem(BaseModel):
- name: str = Field(title="Name")
- path: Optional[str] = Field(title="Path")
-
-class FaceRestorerItem(BaseModel):
- name: str = Field(title="Name")
- cmd_dir: Optional[str] = Field(title="Path")
-
-class RealesrganItem(BaseModel):
- name: str = Field(title="Name")
- path: Optional[str] = Field(title="Path")
- scale: Optional[int] = Field(title="Scale")
-
-class PromptStyleItem(BaseModel):
- name: str = Field(title="Name")
- prompt: Optional[str] = Field(title="Prompt")
- negative_prompt: Optional[str] = Field(title="Negative Prompt")
-
-class ArtistItem(BaseModel):
- name: str = Field(title="Name")
- score: float = Field(title="Score")
- category: str = Field(title="Category")
-
-class EmbeddingItem(BaseModel):
- step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
- sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available")
- sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead")
- shape: int = Field(title="Shape", description="The length of each individual vector in the embedding")
- vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
-
-class EmbeddingsResponse(BaseModel):
- loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
- skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
-
-class MemoryResponse(BaseModel):
- ram: dict = Field(title="RAM", description="System memory stats")
- cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
diff --git a/spaces/arnavkartikeya/SCRIPture-final/README.md b/spaces/arnavkartikeya/SCRIPture-final/README.md
deleted file mode 100644
index df43cb89f6d863c5def7c89e296043938af78631..0000000000000000000000000000000000000000
--- a/spaces/arnavkartikeya/SCRIPture-final/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: SCRIPture
-emoji: emoji
-colorFrom: Blue
-colorTo: Green
-sdk: gradio
-sdk_version: 3.9.0
-app_file: app.py
-pinned: false
----
diff --git a/spaces/arngpt/Summarizer-Trax/app.py b/spaces/arngpt/Summarizer-Trax/app.py
deleted file mode 100644
index 8157365f5b2fc53b87cfa6104bb1e56ebba37576..0000000000000000000000000000000000000000
--- a/spaces/arngpt/Summarizer-Trax/app.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import sys
-import os
-import gradio as gr
-import numpy as np
-
-import textwrap
-wrapper = textwrap.TextWrapper(width=70)
-
-import trax
-from trax import layers as tl
-from trax.fastmath import numpy as jnp
-
-# to print the entire np array
-np.set_printoptions(threshold=sys.maxsize)
-model = trax.models.TransformerLM(vocab_size=33000, d_model=512, d_ff=2048,n_layers=6, n_heads=8, max_len=4096, dropout=0.1,mode='eval', ff_activation=tl.Relu)
-
-model.init_from_file('model/model.pkl.gz',weights_only=True)
-
-def next_symbol(cur_output_tokens, model):
- """Returns the next symbol for a given sentence.
-
- Args:
- cur_output_tokens (list): tokenized sentence with EOS and PAD tokens at the end.
- model (trax.layers.combinators.Serial): The transformer model.
-
- Returns:
- int: tokenized symbol.
- """
- ### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
-
- # current output tokens length
- token_length = len(cur_output_tokens)
- # calculate the minimum power of 2 big enough to store token_length
- # HINT: use np.ceil() and np.log2()
- # add 1 to token_length so np.log2() doesn't receive 0 when token_length is 0
- padded_length = 2**int(np.ceil(np.log2(token_length + 1)))
-
- # Fill cur_output_tokens with 0's until it reaches padded_length
- padded = cur_output_tokens + [0] * (padded_length - token_length)
- padded_with_batch = np.array(padded)[None, :] # Don't replace this None! This is a way of setting the batch dim
-
- # model expects a tuple containing two padded tensors (with batch)
- output, _ = model((padded_with_batch, padded_with_batch))
- # HINT: output has shape (1, padded_length, vocab_size)
- # To get log_probs you need to index output wih 0 in the first dim
- # token_length in the second dim and all of the entries for the last dim.
- log_probs = output[0, token_length, :]
-
- ### END CODE HERE ###
-
- return int(np.argmax(log_probs))
-
-def tokenize(input_str, EOS=1):
- """Input str to features dict, ready for inference"""
-
- # Use the trax.data.tokenize method. It takes streams and returns streams,
- # we get around it by making a 1-element stream with `iter`.
- inputs = next(trax.data.tokenize(iter([input_str]),
- vocab_dir='vocab_dir/',
- vocab_file='summarize32k.subword.subwords'))
-
- # Mark the end of the sentence with EOS
- return list(inputs) + [EOS]
-
-def detokenize(integers):
- """List of ints to str"""
-
- s = trax.data.detokenize(integers,
- vocab_dir='vocab_dir/',
- vocab_file='summarize32k.subword.subwords')
-
- return wrapper.fill(s)
-
-def greedy_decode(input_sentence, model, next_symbol=next_symbol, tokenize=tokenize, detokenize=detokenize):
- """Greedy decode function.
-
- Args:
- input_sentence (string): a sentence or article.
- model (trax.layers.combinators.Serial): Transformer model.
-
- Returns:
- string: summary of the input.
- """
-
- ### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ###
- # Use tokenize()
- cur_output_tokens = tokenize(input_sentence) + [0]
- generated_output = []
- cur_output = 0
- EOS = 1
-
- while cur_output != EOS:
- # Get next symbol
- cur_output = next_symbol(cur_output_tokens, model)
- # Append next symbol to original sentence
- cur_output_tokens.append(cur_output)
- # Append next symbol to generated sentence
- generated_output.append(cur_output)
-
- #print(detokenize(generated_output))
-
- ### END CODE HERE ###
-
- return detokenize(generated_output)
-
-#test_sentence = "It was a sunny day when I went to the market to buy some flowers. But I only found roses, not tulips."
-
-
-
-def summarizer(text):
- output=greedy_decode(text,model)
- output=output[1:-5]
- return output
-
-gr.Interface(fn=summarizer, inputs=["text"], outputs=[ "text"]).launch()
\ No newline at end of file
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/bark/hubert/kmeans_hubert.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/bark/hubert/kmeans_hubert.py
deleted file mode 100644
index a6a3b9aeb1111ca0abeccb6142007ecc5b39d78d..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/bark/hubert/kmeans_hubert.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""
-Modified HuBERT model without kmeans.
-Original author: https://github.com/lucidrains/
-Modified by: https://www.github.com/gitmylo/
-License: MIT
-"""
-
-# Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py
-
-import logging
-from pathlib import Path
-
-import torch
-from einops import pack, unpack
-from torch import nn
-from torchaudio.functional import resample
-from transformers import HubertModel
-
-
-def round_down_nearest_multiple(num, divisor):
- return num // divisor * divisor
-
-
-def curtail_to_multiple(t, mult, from_left=False):
- data_len = t.shape[-1]
- rounded_seq_len = round_down_nearest_multiple(data_len, mult)
- seq_slice = slice(None, rounded_seq_len) if not from_left else slice(-rounded_seq_len, None)
- return t[..., seq_slice]
-
-
-def exists(val):
- return val is not None
-
-
-def default(val, d):
- return val if exists(val) else d
-
-
-class CustomHubert(nn.Module):
- """
- checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
- or you can train your own
- """
-
- def __init__(self, checkpoint_path, target_sample_hz=16000, seq_len_multiple_of=None, output_layer=9, device=None):
- super().__init__()
- self.target_sample_hz = target_sample_hz
- self.seq_len_multiple_of = seq_len_multiple_of
- self.output_layer = output_layer
- if device is not None:
- self.to(device)
- self.model = HubertModel.from_pretrained("facebook/hubert-base-ls960")
- if device is not None:
- self.model.to(device)
- self.model.eval()
-
- @property
- def groups(self):
- return 1
-
- @torch.no_grad()
- def forward(self, wav_input, flatten=True, input_sample_hz=None):
- device = wav_input.device
-
- if exists(input_sample_hz):
- wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
-
- if exists(self.seq_len_multiple_of):
- wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of)
-
- outputs = self.model.forward(
- wav_input,
- output_hidden_states=True,
- )
- embed = outputs["hidden_states"][self.output_layer]
- embed, packed_shape = pack([embed], "* d")
- codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device)
- if flatten:
- return codebook_indices
-
- (codebook_indices,) = unpack(codebook_indices, packed_shape, "*")
- return codebook_indices
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/models/melgan_generator.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/models/melgan_generator.py
deleted file mode 100644
index 989797f0b8537c96ff4cfd1c2c6af856cecc79fc..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vocoder/models/melgan_generator.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import torch
-from torch import nn
-from torch.nn.utils import weight_norm
-
-from TTS.utils.io import load_fsspec
-from TTS.vocoder.layers.melgan import ResidualStack
-
-
-class MelganGenerator(nn.Module):
- def __init__(
- self,
- in_channels=80,
- out_channels=1,
- proj_kernel=7,
- base_channels=512,
- upsample_factors=(8, 8, 2, 2),
- res_kernel=3,
- num_res_blocks=3,
- ):
- super().__init__()
-
- # assert model parameters
- assert (proj_kernel - 1) % 2 == 0, " [!] proj_kernel should be an odd number."
-
- # setup additional model parameters
- base_padding = (proj_kernel - 1) // 2
- act_slope = 0.2
- self.inference_padding = 2
-
- # initial layer
- layers = []
- layers += [
- nn.ReflectionPad1d(base_padding),
- weight_norm(nn.Conv1d(in_channels, base_channels, kernel_size=proj_kernel, stride=1, bias=True)),
- ]
-
- # upsampling layers and residual stacks
- for idx, upsample_factor in enumerate(upsample_factors):
- layer_in_channels = base_channels // (2**idx)
- layer_out_channels = base_channels // (2 ** (idx + 1))
- layer_filter_size = upsample_factor * 2
- layer_stride = upsample_factor
- layer_output_padding = upsample_factor % 2
- layer_padding = upsample_factor // 2 + layer_output_padding
- layers += [
- nn.LeakyReLU(act_slope),
- weight_norm(
- nn.ConvTranspose1d(
- layer_in_channels,
- layer_out_channels,
- layer_filter_size,
- stride=layer_stride,
- padding=layer_padding,
- output_padding=layer_output_padding,
- bias=True,
- )
- ),
- ResidualStack(channels=layer_out_channels, num_res_blocks=num_res_blocks, kernel_size=res_kernel),
- ]
-
- layers += [nn.LeakyReLU(act_slope)]
-
- # final layer
- layers += [
- nn.ReflectionPad1d(base_padding),
- weight_norm(nn.Conv1d(layer_out_channels, out_channels, proj_kernel, stride=1, bias=True)),
- nn.Tanh(),
- ]
- self.layers = nn.Sequential(*layers)
-
- def forward(self, c):
- return self.layers(c)
-
- def inference(self, c):
- c = c.to(self.layers[1].weight.device)
- c = torch.nn.functional.pad(c, (self.inference_padding, self.inference_padding), "replicate")
- return self.layers(c)
-
- def remove_weight_norm(self):
- for _, layer in enumerate(self.layers):
- if len(layer.state_dict()) != 0:
- try:
- nn.utils.remove_weight_norm(layer)
- except ValueError:
- layer.remove_weight_norm()
-
- def load_checkpoint(
- self, config, checkpoint_path, eval=False, cache=False
- ): # pylint: disable=unused-argument, redefined-builtin
- state = load_fsspec(checkpoint_path, map_location=torch.device("cpu"), cache=cache)
- self.load_state_dict(state["model"])
- if eval:
- self.eval()
- assert not self.training
- self.remove_weight_norm()
diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests2/test_glow_tts_d-vectors_train.py b/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests2/test_glow_tts_d-vectors_train.py
deleted file mode 100644
index f1cfd4368f9a0658e6b94ad9fc9697ba75f30fed..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests2/test_glow_tts_d-vectors_train.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import glob
-import json
-import os
-import shutil
-
-from trainer import get_last_checkpoint
-
-from tests import get_device_id, get_tests_output_path, run_cli
-from TTS.tts.configs.glow_tts_config import GlowTTSConfig
-
-config_path = os.path.join(get_tests_output_path(), "test_model_config.json")
-output_path = os.path.join(get_tests_output_path(), "train_outputs")
-
-
-config = GlowTTSConfig(
- batch_size=2,
- eval_batch_size=8,
- num_loader_workers=0,
- num_eval_loader_workers=0,
- text_cleaner="english_cleaners",
- use_phonemes=True,
- phoneme_language="en-us",
- phoneme_cache_path="tests/data/ljspeech/phoneme_cache/",
- run_eval=True,
- test_delay_epochs=-1,
- epochs=1,
- print_step=1,
- print_eval=True,
- test_sentences=[
- "Be a voice, not an echo.",
- ],
- data_dep_init_steps=1.0,
- use_speaker_embedding=False,
- use_d_vector_file=True,
- d_vector_file="tests/data/ljspeech/speakers.json",
- d_vector_dim=256,
-)
-config.audio.do_trim_silence = True
-config.audio.trim_db = 60
-config.save_json(config_path)
-
-# train the model for one epoch
-command_train = (
- f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} "
- f"--coqpit.output_path {output_path} "
- "--coqpit.datasets.0.formatter ljspeech_test "
- "--coqpit.datasets.0.meta_file_train metadata.csv "
- "--coqpit.datasets.0.meta_file_val metadata.csv "
- "--coqpit.datasets.0.path tests/data/ljspeech "
- "--coqpit.datasets.0.meta_file_attn_mask tests/data/ljspeech/metadata_attn_mask.txt "
- "--coqpit.test_delay_epochs 0"
-)
-run_cli(command_train)
-
-# Find latest folder
-continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
-
-# Inference using TTS API
-continue_config_path = os.path.join(continue_path, "config.json")
-continue_restore_path, _ = get_last_checkpoint(continue_path)
-out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
-speaker_id = "ljspeech-1"
-continue_speakers_path = config.d_vector_file
-
-# Check integrity of the config
-with open(continue_config_path, "r", encoding="utf-8") as f:
- config_loaded = json.load(f)
-assert config_loaded["characters"] is not None
-assert config_loaded["output_path"] in continue_path
-assert config_loaded["test_delay_epochs"] == 0
-
-# Load the model and run inference
-inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
-run_cli(inference_command)
-
-# restore the model and continue training for one more epoch
-command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} "
-run_cli(command_train)
-shutil.rmtree(continue_path)
diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/vc_tests/__init__.py b/spaces/artificialguybr/video-dubbing/TTS/tests/vc_tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/autotrain-projects/llm-merge-adapter/README.md b/spaces/autotrain-projects/llm-merge-adapter/README.md
deleted file mode 100644
index 08df6d1f0969363e9cf0dd37df8b23c8a5aa5a90..0000000000000000000000000000000000000000
--- a/spaces/autotrain-projects/llm-merge-adapter/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: LLM Merge Adapter
-emoji: 🐢
-colorFrom: indigo
-colorTo: blue
-sdk: gradio
-sdk_version: 3.50.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/DockerGoFlanT5/main.py b/spaces/awacke1/DockerGoFlanT5/main.py
deleted file mode 100644
index 5000cdaca953ce182c37c955940b38296c401d21..0000000000000000000000000000000000000000
--- a/spaces/awacke1/DockerGoFlanT5/main.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from fastapi import FastAPI
-from fastapi.staticfiles import StaticFiles
-from fastapi.responses import FileResponse
-
-from transformers import pipeline
-
-app = FastAPI()
-
-pipe_flan = pipeline("text2text-generation", model="google/flan-t5-small")
-
-@app.get("/infer_t5")
-def t5(input):
- output = pipe_flan(input)
- return {"output": output[0]["generated_text"]}
-
-app.mount("/", StaticFiles(directory="static", html=True), name="static")
-
-@app.get("/")
-def index() -> FileResponse:
- return FileResponse(path="/app/static/index.html", media_type="text/html")
diff --git a/spaces/awacke1/Examples-Of-AI-0302/app.py b/spaces/awacke1/Examples-Of-AI-0302/app.py
deleted file mode 100644
index 1d37e1ba5cdbf6b844bbc2fd0e3b209c2a66fc63..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Examples-Of-AI-0302/app.py
+++ /dev/null
@@ -1,856 +0,0 @@
-import streamlit as st
-from graphviz import Digraph
-
-
-st.markdown("""
-# 👋 Two easy ways to turbo boost your AI learning journey! 💻
-# 🌐 AI Pair Programming
-## Open 2 Browsers to:
-1. __🌐 ChatGPT__ [URL](https://chat.openai.com/chat) or [URL2](https://platform.openai.com/playground) and
-2. __🌐 Huggingface__ [URL](https://huggingface.co/awacke1) in separate browser windows.
-1. 🤖 Use prompts to generate a streamlit program on Huggingface or locally to test it.
-2. 🔧 For advanced work, add Python 3.10 and VSCode locally, and debug as gradio or streamlit apps.
-3. 🚀 Use these two superpower processes to reduce the time it takes you to make a new AI program! ⏱️
-# 🎥 YouTube University Method:
-1. 🏋️♀️ Plan two hours each weekday to exercise your body and brain.
-2. 🎬 Make a playlist of videos you want to learn from on YouTube. Save the links to edit later.
-3. 🚀 Try watching the videos at a faster speed while exercising, and sample the first five minutes of each video.
-4. 📜 Reorder the playlist so the most useful videos are at the front, and take breaks to exercise.
-5. 📝 Practice note-taking in markdown to instantly save what you want to remember. Share your notes with others!
-6. 👥 AI Pair Programming Using Long Answer Language Models with Human Feedback:
-## 🎥 2023 AI/ML Advanced Learning Playlists:
-1. [2023 QA Models and Long Form Question Answering NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFovrkkx8HMTLNgYdjCMNYmX_)
-2. [FHIR Bioinformatics Development Using AI/ML and Python, Streamlit, and Gradio - 2022](https://www.youtube.com/playlist?list=PLHgX2IExbFovoMUC3hYXeFegpk_Y0Lz0Q)
-3. [2023 ChatGPT for Coding Assistant Streamlit, Gradio and Python Apps](https://www.youtube.com/playlist?list=PLHgX2IExbFouOEnppexiKZVdz_k5b0pvI)
-4. [2023 BigScience Bloom - Large Language Model for AI Systems and NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFouqnsIqziThlPCX_miiDq14)
-5. [2023 Streamlit Pro Tips for AI UI UX for Data Science, Engineering, and Mathematics](https://www.youtube.com/playlist?list=PLHgX2IExbFou3cP19hHO9Xb-cN8uwr5RM)
-6. [2023 Fun, New and Interesting AI, Videos, and AI/ML Techniques](https://www.youtube.com/playlist?list=PLHgX2IExbFotoMt32SrT3Xynt5BXTGnEP)
-7. [2023 Best Minds in AGI AI Gamification and Large Language Models](https://www.youtube.com/playlist?list=PLHgX2IExbFotmFeBTpyje1uI22n0GAkXT)
-8. [2023 State of the Art for Vision Image Classification, Text Classification and Regression, Extractive Question Answering and Tabular Classification](https://www.youtube.com/playlist?list=PLHgX2IExbFotPcPu6pauNHOoZTTbnAQ2F)
-9. [2023 AutoML DataRobot and AI Platforms for Building Models, Features, Test, and Transparency](https://www.youtube.com/playlist?list=PLHgX2IExbFovsY2oGbDwdEhPrakkC8i3g)
-""")
-
-
-st.markdown("""
-# Cognitive AI with Human Feedback (CAHF) [Example 🩺⚕️](https://huggingface.co/spaces/awacke1/Cognitive-AI-Episodic-Semantic-Memory-Demo):
-1. Create and use Models to predict __outcomes__
-2. Use AI to predict **conditions, disease, and opportunities** using AI with **explainability**.
-3. **Cognitive AI** - Mimic how humans reason through decision making processes.
-4. **Reasoning cycles** - "Recommended for You" reasoners - consider type of personalized needs and classification for users, to recommend products
-5. **High Acuity Reasoners** - Make decisions on rules of **what it can and cannot do within human feedback** guidelines.
- -Emphasizes **explainability, transparency, and removing administrative burden** to **protocolize** and improve what staff is doing.
- -Vetted by SME's, adding value of **judgement and training** and pick up intelligence and **skills from human feedback**.
- -**Alert, Recommended Action, and Clinical Terms** per entity with vocabularies from LOINC, SNOMED, OMS, ICD10, RXNORM, SMILES, HCPCS, CPT, CQM, HL7, SDC and FHIR.
-6. Non static multi agent cognitive approach using real time series to identify factors predictive of outcome.
-7. Cognitive models form of Ontology - to create a type of computable sets and relationships stored in Ontology then ingested by reasoner
- -Use models of world to build predictions and recommendations with answers cumulative with information we know
-8. Reasoners standardize making it easy as possible to do right thing using transfer learning and recommendation tools with questions and actions.
-""")
-
-
-st.markdown("""
-# 📚 Clinical Terminology and Ontologies [Example 🩺⚕️NLP Clinical Ontology Biomedical NER](https://huggingface.co/spaces/awacke1/Biomed-NLP-AI-Clinical-Terminology)
-## Health Vocabularies, Systems of Coding, and Databases with Bibliographies
-##__Keywords__:
-1. __Clinical Terminology__: 💬 Words that doctors use to talk to each other about patients.
-2. __Ontologies for Medications and Conditions__: 📚 A fancy way of organizing knowledge about medicine and health problems.
-3. __Health Vocabularies__: 📝 A special list of words used in healthcare to talk about health issues.
-4. __Systems of Coding__: 💻 A way of giving things like sicknesses and treatments special codes, so that doctors can remember them easily.
-5. __Databases__: 🗄️ A computer system that stores information about patients, health research, and other healthcare things.
-6. __Bibliographies__: 📖 A list of books or articles that doctors use to learn about new health information.
-1. ## 1️⃣ National Library of Medicine's **RxNorm**:
- - Standardized nomenclature for clinical drugs developed by NLM
- - Provides links between drug names and related information such as ingredients, strengths, and dosages
- - **Data type: controlled vocabulary**
- - Access through **NLM's RxNorm website**: https://www.nlm.nih.gov/research/umls/rxnorm/index.html
-2. ## 2️⃣ Centers for Medicare and Medicaid Services' Healthcare Common Procedure Coding System (HCPCS):
- - Coding system used to identify healthcare **services, procedures, and supplies**
- - Includes **codes for drugs, biologicals, and other items** used in medical care
- - **Data type: coding system**
- - Access through **CMS website**: https://www.cms.gov/Medicare/Coding/MedHCPCSGenInfo
-3. ## 3️⃣ Unified Medical Language System (UMLS):
- - Set of files and software tools developed by NLM for integrating and mapping biomedical vocabularies
- - Includes RxNorm and other drug vocabularies, as well as other terminologies used in medicine
- - **Data type: controlled vocabulary**
- - Access through UMLS Metathesaurus: https://www.nlm.nih.gov/research/umls/index.html
-4. ## 4️⃣ PubMed:
- - Database of **biomedical literature** maintained by the National Center for Biotechnology Information (NCBI)
- - Includes information about **drugs, including drug names, chemical structures, and pharmacological actions**
- - **Data type: bibliographic database**
- - Access through **PubMed website**: https://pubmed.ncbi.nlm.nih.gov/
-5. ## 5️⃣ PubChem:
- - Database of chemical substances maintained by NCBI
- - Includes information about drugs, including **chemical structures, properties, and activities**
- - **Data type: chemical database**
- - Access through **PubChem website**: https://pubchem.ncbi.nlm.nih.gov/
-6. ## 6️⃣ Behavioral Health Code Terminology Sets:
- - Code terminology sets specific to behavioral health
- - Includes **DSM** published by American Psychiatric Association, **ICD** published by World Health Organization, and **CPT** published by American Medical Association
- - **Data type: coding system**
- - Access through respective **organizations' websites**:
- 1. [DSM](https://www.psychiatry.org/psychiatrists/practice/dsm)
- 2. [ICD](https://www.who.int/standards/classifications/classification-of-diseases)
- 3. [CPT](https://www.ama-assn.org/practice-management/cpt/current-procedural-terminology-cpt)
-""")
-
-st.markdown("""
-1. # 📚Natural Language Processing🔤 - 🗣️🤖💭💬🌍🔍
- 1. 🤔 **🩺⚕️ Sentiment analysis** - Determine underlying sentiment of text. [Example](https://huggingface.co/spaces/awacke1/Sentiment-analysis-streamlit)
- 2. 📝 **Named Entity Recognition (NER)** - Identify and classify named entities in text. [Example](https://huggingface.co/spaces/awacke1/Named-entity-resolution)
- 3. 🔊 **🩺⚕️Automatic Speech Recognition (ASR)** - Transcribe spoken language into text.
- # Advanced NLP ASR Examples:
- 1. 🩺⚕️ https://huggingface.co/spaces/awacke1/ASR-High-Accuracy-Test
- 2. https://huggingface.co/spaces/awacke1/ASRGenerateStory
- 3. 🩺⚕️ https://huggingface.co/spaces/awacke1/TTS-STT-Blocks
- 4. 🩺⚕️ https://huggingface.co/spaces/awacke1/CloneAnyVoice
- 5. https://huggingface.co/spaces/awacke1/ASR-SOTA-NvidiaSTTMozilla
- 4. 🌐 **Machine translation** - Translate text between languages automatically. [Example](https://huggingface.co/spaces/awacke1/Machine-translation)
- 5. 📄 **Text summarization** - Automatically summarize large volumes of text. [Example](https://huggingface.co/spaces/awacke1/Text-summarization)
- 6. ❓ **🩺⚕️ Question answering** - Answer questions posed in natural language. [Example](https://huggingface.co/spaces/awacke1/Question-answering)
- 7. 🤖 **Sentiment-aware chatbots** - Use sentiment analysis to detect user emotions and respond appropriately.
- 8. 📊 **🩺⚕️ Text classification** - Classify text into different categories. [Example](https://huggingface.co/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli)
- 9. 💬 **🩺⚕️ Text generation** - Generate natural language text. [Example](https://huggingface.co/spaces/awacke1/Sentence2Paragraph)
- 10. 🔎 **Topic modeling** - Automatically identify topics in a large corpus of text. [Example](https://huggingface.co/spaces/awacke1/Topic-modeling)
- - Examples
- 1. [NLP Video Summary](https://huggingface.co/spaces/awacke1/Video-Summary)
- 2. [TTS-STT ASR with Multiple Voices](https://huggingface.co/spaces/awacke1/TTS-STT-Blocks)
- 3. [NLP Transcript with Video Player](https://huggingface.co/spaces/awacke1/Streamlit-ASR-Video)
- 4. [NLP Clinical Ontology Biomedical NER](https://huggingface.co/spaces/awacke1/Biomed-NLP-AI-Clinical-Terminology)
- 5. [Document Understanding and NLP](https://huggingface.co/spaces/awacke1/AIDocumentUnderstandingOCR)
- 6. [NLP ASR Wav2Vec2 Multilingual](https://huggingface.co/spaces/awacke1/ASR-High-Accuracy-Test)
- 7. [Live ASR](https://huggingface.co/spaces/awacke1/ASR-SOTA-NvidiaSTTMozilla)
- 8. [NLP and Visualization](https://huggingface.co/spaces/awacke1/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL)
-""")
-
-st.markdown("""
-2. # 🔮Generative AI💭 (🎨Images and 📝Text) - 🎵🧩🔄📊🌌
- 1. 🆕 **🩺⚕️ Generation of new data**: Create new data that resembles existing data. [Example](https://huggingface.co/spaces/awacke1/GenAI-Generate-New-Data-Resembling-Example)
- 2. 🎨 **Creative potential**: Generate music, art, or literature. [Example](https://huggingface.co/spaces/awacke1/Creative-Potential-Music-Art-Lit)
- 3. 📊 **Data synthesis**: Synthesize data from multiple sources to create new datasets. [Example](https://huggingface.co/spaces/awacke1/Data-Synthesizer-Synthesize-From-Multiple-Sources)
- 4. 📈 **🩺⚕️ Data augmentation**: Augment existing datasets to make them larger and more diverse. [Example](https://huggingface.co/spaces/awacke1/Data-Augmentation)
- 5. 🔀 **Domain transfer**: Transfer knowledge learned from one domain to another.
- 6. 🔍 **Unsupervised learning**: Learn patterns without labeled training data.
- 7. 🔄 **Adaptive learning**: Adapt to changes in data over time.
- 8. 🔊 **Noise injection**: Introduce noise to explore a wider range of possibilities.
- 9. 🕶️ **Latent space manipulation**: Control output by manipulating a model's latent space.
- 10. 🖼️ **Realistic output**: Produce output that is difficult to distinguish from human-created data.
- - Examples
- 1. Quantum AI Circuits: https://huggingface.co/spaces/awacke1/AI-Quantum?option=Circuit
- 2. Generate Story and Video: https://huggingface.co/spaces/awacke1/ASRGenerateStoryandVideo
- 3. ASR Generate Story: https://huggingface.co/spaces/awacke1/ASRGenerateStory
- 4. Music Generation: https://huggingface.co/spaces/awacke1/MusicMaker
-""")
-
-st.markdown("""
-3. # 📷Image Recognition🏞️
- 1. 📷 **Object detection**: Detect and identify multiple objects in an image for detailed analysis and classification.
- 2. 🏞️ **Scene recognition**: Recognize and classify entire scenes based on objects, colors, and shapes.
- 3. 😃 **Facial recognition**: Analyze facial features for accurate identification.
- 4. 😊 **Emotion recognition**: Identify emotions on a subject's face, including happiness, sadness, and anger.
- 5. 🔤 **Text recognition**: Identify and translate text in images for analysis.
- 6. 🎨 **Color recognition**: Detect colors and provide information on hue, saturation, and brightness.
- 7. 🔍 **Image segmentation**: Divide an image into multiple regions for individual analysis and classification.
- 8. 🌅 **Image restoration**: Remove noise and blur, restoring images to original clarity and quality.
- 9. 🔖 **Image classification**: Classify images into categories like animals, buildings, or landscapes.
- 10. 🎨 **Style transfer**: Apply the style of one image to another for unique and innovative results.
- - Examples
- 1. 🩺⚕️ Text-to-Image : [Image Classification](https://huggingface.co/spaces/awacke1/Prompt-Refinery-Text-to-Image-Generation)
- 2. Image Captions from 5 SOTA Generators: [URL](https://huggingface.co/spaces/awacke1/ImageCaptionPromptGenerator)
- 3. 🩺⚕️ Image to Multilingual OCR: [URL](https://huggingface.co/spaces/awacke1/Image-to-Multilingual-OCR)
- 4. WRN - Wide Residual Networks: [URL](https://huggingface.co/spaces/awacke1/ResnetPytorchImageRecognition)
- 5. AI Document Understanding: [URL](https://huggingface.co/spaces/awacke1/AIDocumentUnderstandingOCR)
- 6. Elixir Docker Bumblebee: [URL](https://huggingface.co/spaces/awacke1/DockerImageRecognitionToText)
- 7. Speech to Text to Story to Images to Video: [URL](https://huggingface.co/spaces/awacke1/Speeech2Text2Story2Images2Video)
- 8. Image to Line Drawings: [URL](https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings)
- 9. Semantic Image Search: [URL](https://huggingface.co/spaces/awacke1/Image-Semantic-Search)
- 10. Zoom Clip Toon: [URL](https://huggingface.co/spaces/awacke1/Zoom-Clip-Toon-Image-to-Image)
- 11. Image to Reading Labels: [URL](https://huggingface.co/spaces/awacke1/ImageOCRMultilingual)
- 12. A Game For That - Gamification Using Snapshot Images: [URL](https://huggingface.co/spaces/awacke1/AGameForThat)
- 13. AI Visually Plays QBert, Pong, Seaquest and more: [URL](https://huggingface.co/spaces/awacke1/AI-Atari-Live-Streamlit)
- 14. AI Creates Generator Style Mix Art from Encyclopedia: [URL](https://huggingface.co/spaces/awacke1/Art-Generator-and-Style-Mixer)
- 15. BigGAN Image Gen and Search: [URL](https://huggingface.co/spaces/awacke1/AI-BigGAN-Image-Gen)
- 16. Art Style Line Drawings: [URL](https://huggingface.co/spaces/awacke1/ArtStyleFoodsandNutrition)
- 17. 🩺⚕️ Yolo Real Time Image Recognition from Webcam: https://huggingface.co/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco
-""")
-
-st.markdown("""
-4. # 🗣️Speech Recognition💬
- 1. 🔊 **Continuous Speech Recognition**: Transcribe spoken words in real-time without pausing.
- 2. 🗣️ **Speaker Identification**: Identify individual speakers through unique features in their speech.
- 3. 🧠 **Contextual Awareness**: Understand conversation context and interpret word meaning.
- 4. 🌎 **Multilingual Support**: Recognize and transcribe multiple languages for translation.
- 5. 🔇 **Noise Reduction**: Filter out background noise to improve transcription quality.
- 6. 🔒 **Voice Biometrics**: Verify speaker identity and provide secure access to personal data.
- 7. 🎛️ **Command and Control**: Interpret voice commands to automate tasks and interact with software.
- 8. 💬 **Natural Language Processing**: Understand complex human speech patterns.
- 9. 🧠 **Adaptive Learning**: Learn and adapt to improve accuracy over time.
- 10. ☁️ **Cloud-Based Deployment**: Real-time processing of large amounts of data, even on mobile devices.
-""")
-
-st.markdown("""
-5. # Reinforcement Learning
- 1. 🏆 **Reward-driven**: RL uses rewards or punishments to drive its learning process.
- 2. 🧪 **Trial-and-error learning**: RL is a trial-and-error learning method, where an agent tries different actions to find the best action that will maximize the cumulative reward.
- 3. 🤔 **Exploration-exploitation trade-off**: RL agents need to balance exploration and exploitation to find new possibilities while also exploiting successful actions.
- 4. 📈 **Markov Decision Processes**: RL uses MDPs to model decision-making processes.
- 5. 📊 **Policy optimization**: RL uses policy optimization techniques to find the best policy for a given task or learn the optimal policy from scratch.
- 6. 💰 **Value-based methods**: RL uses value-based methods to estimate the value of each state or action.
- 7. 🧠 **Model-based methods**: RL can use model-based methods to predict the outcomes of different actions.
- 8. 🤖 **Deep Reinforcement Learning**: DRL combines RL with deep learning techniques to learn complex decision-making tasks.
- 9. 🔄 **Transfer learning**: RL can use transfer learning techniques to transfer knowledge learned in one task to another task.
- 10. 🤝 **Multi-agent RL**: RL can handle multiple agents that interact with each other.
-""")
-
-st.markdown("""
-6. 🎲Game Theory🎲 – Traditional AI processes
- 1. 🤝 **Interdependence**: Game Theory considers decision-making among multiple agents, unlike traditional AI processes which focus on a single agent.
- 2. 🎯 **Strategic Behavior**: Game Theory assumes that agents aim to maximize their payoffs based on the actions of other agents. Traditional AI may not consider this strategic element.
- 3. 💰 **Payoffs**: Game Theory calculates payoffs for each agent based on their actions and the actions of other agents, unlike traditional AI which may focus on a single objective.
- 4. ⚖️ **Equilibrium**: Game Theory seeks to identify stable states in the game where no agent has an incentive to deviate from their current strategy. Traditional AI may not seek to find an equilibrium.
- 5. 🎲 **Game Formulation**: Game Theory formulates a game, including rules, players, and possible actions, unlike traditional AI which may not require such formulation.
- 6. 💡 **Solution Concepts**: Game Theory has various solution concepts, such as Nash Equilibrium and Pareto Efficiency, to identify the most desirable outcomes. Traditional AI may not have such concepts.
- 7. 📊 **Information**: Game Theory considers the information available to each agent in the game. Traditional AI may not consider information explicitly.
- 8. ⚔️ **Adversarial**: Game Theory models adversarial scenarios where agents have conflicting goals. Traditional AI may assume cooperation among agents.
- 9. ❓ **Uncertainty**: Game Theory deals with uncertainty and incomplete information in the game. Traditional AI may not consider uncertainty.
- 10. 🌐 **Complexity**: Game Theory deals with complex multi-agent interactions. Traditional AI may focus on single-agent optimization.
- - Examples
- 1. 🩺⚕️ Health Care Game: https://huggingface.co/spaces/awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game
- 2. 🩺⚕️ Sankey Snacks Math Chart Animator: https://huggingface.co/spaces/awacke1/Sankey-Snacks
- 3. Blackjack 21 : https://huggingface.co/spaces/awacke1/BlackjackSimulatorCardGameAI
- 4. Player Card Monster Battler: https://huggingface.co/spaces/awacke1/Player-Card-Monster-Battler-For-Math-and-AI
- 5. Emojitrition: https://huggingface.co/spaces/awacke1/Emojitrition-Fun-and-Easy-Nutrition
-""")
-
-st.markdown("""
-7. # 🃏Card Game🃏 Activity
- 1. 🃏 **Card crafting**: Combine existing cards or materials to craft custom cards. [Example](https://huggingface.co/spaces/awacke1/CardCrafter-CraftCustomCards)
- 2. 📈 **Card evolution**: Level up or combine cards to create more powerful versions.
- 3. 🔨 **Deck building**: Build custom decks that match your play style.
- 4. ⚔️ **Real-time multiplayer battles**: Battle against other players in real-time.
- 5. 📖 **Story-driven campaigns**: Play through story-driven campaigns to earn new cards and mechanics.
- 6. 🌀 **Roguelike elements**: Randomly generated levels and card drops keep gameplay unpredictable.
- 7. 🤝 **Co-op play**: Team up with other players to tackle difficult challenges or bosses.
- 8. 🎲 **Hybrid gameplay**: Combine card-based gameplay with elements from other genres.
- 9. 💥 **Multi-card play**: Use multiple cards at once to create powerful combos or synergies.
- 10. 🗺️ **Tactical positioning**: Strategically place your cards on a game board or battlefield to gain an advantage.
- - Examples
- 1. 🩺⚕️ Game Activity Graph: https://huggingface.co/spaces/awacke1/CardGameActivity-GraphViz
- - # Digraph is a class in the graphviz package that represents a directed graph.
- 1. It is used to create graphs with nodes and edges.
- 2. It can be customized with various styles and formatting options.
- 3. This is an example of defining a Digraph with emojis for the node labels:
- 2. 🩺⚕️ SVG Card Generation: https://huggingface.co/spaces/awacke1/VizLib-SVGWrite-Streamlit
- - # Scalable Vector Graphics (SVG) is an important language used in UI and graphic design.
- 3. Game Mechanics Top 20: https://huggingface.co/spaces/awacke1/CardGameMechanics
- 4. Game Mechanics Deep Dive: https://huggingface.co/spaces/awacke1/CardGameActivity
- 5. Hexagon Dice: https://huggingface.co/spaces/awacke1/Hexagon-Dice-Fractal-Math-Game
- 6. Dice Roll Game: https://huggingface.co/spaces/awacke1/Dice-Roll-Fractals-STEM-Math
- 7. Pyplot Dice Game: https://huggingface.co/spaces/awacke1/Streamlit-Pyplot-Math-Dice-Game
-""")
-
-
-st.markdown("""
-## AI For Long Question Answering and Fact Checking [Example](🩺⚕️ https://huggingface.co/spaces/awacke1/StreamlitWikipediaChat)
-1. 🖥️ First, we'll teach a smart computer to browse the internet and find information.
- - 🧠 It will be like having a super-smart search engine!
-2. 🤖 Then, we'll train the computer to answer questions by having it learn from how humans answer questions.
- - 🤝 We'll teach it to imitate how people find and use information on the internet.
-3. 📚 To make sure the computer's answers are correct, we'll teach it to collect references from the internet to support its answers.
- - 🔍 This way, it will only give answers that are true and based on facts.
-4. 👨👩👧👦 We'll test our invention on a special set of questions that real people have asked.
- - 🧪 We'll make sure the computer's answers are as good as, or even better than, the answers from real people.
-5. 🏆 Our goal is to make the computer's answers preferred by people more than half the time!
- - 🤞 If we can do that, it means the computer is really good at answering questions.
-""")
-
-
-
-st.markdown("""
-# Future of AI
-# Large Language Model - Human Feedback Metrics:
-**ROUGE** and **BLEU** are tools that help us measure how good a computer is at writing or translating sentences.
-## 🩺⚕️ [ROUGE](https://huggingface.co/spaces/evaluate-metric/rouge)
-## 🩺⚕️ [BLEU](https://huggingface.co/spaces/evaluate-metric/bleu)
-1. ROUGE looks at a sentence made by a computer and checks how similar it is to sentences made by humans.
- 1. It tries to see if the important information is the same.
-2. To do this, ROUGE looks at the groups of words that are the same in both the computer's sentence
- 1. and the human's sentence.
- 2. The more groups of words that are the same, the higher the score.
-3. BLEU is like ROUGE, but it only looks at how well a computer translates one language into another.
- 1. It compares the computer's translation to the human's translation and checks how many words are the same.
-# If the scores for ROUGE or BLEU are high, it means that the computer is doing a good job.
-1. But it's also important to remember that these tools have their limits,
-2. and we need to use other ways to check if the computer is doing a good job.
-1. **ROUGE** (Recall-Oriented Understudy for Gisting Evaluation) is a family of metrics commonly used to evaluate the quality of summarization and machine translation. ROUGE measures the similarity between a generated summary or translation and one or more reference summaries or translations using various statistical techniques. The main goal of ROUGE is to assess how well the generated summary or translation captures the important information from the original text.
-2. **ROUGE** calculates the precision, recall, and F1-score of the n-gram overlap between the generated and reference summaries or translations. Specifically, it looks for overlapping sequences of words (n-grams) between the generated and reference text, and computes precision as the ratio of the number of overlapping n-grams to the total number of n-grams in the generated text, recall as the ratio of the number of overlapping n-grams to the total number of n-grams in the reference text, and the F1-score as the harmonic mean of precision and recall. ROUGE can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc., as well as at the sentence or document level.
-3. **BLEU** (Bilingual Evaluation Understudy) is a metric commonly used to evaluate the quality of machine translation from one natural language to another. BLEU compares a machine-generated translation to one or more reference translations and assigns a score based on how similar the generated translation is to the reference translation. BLEU uses a modified form of precision to calculate the score.
-4. **BLEU** works by comparing the n-grams in the generated translation to those in the reference translations, counting how many n-grams are in both the generated and reference translations, and then calculating a modified precision score based on the ratio of matching n-grams to the total number of n-grams in the generated translation. BLEU can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc. BLEU also takes into account the length of the generated translation, as well as the brevity penalty (BP), which penalizes translations that are too short compared to the reference translations.
-5. In general, the higher the ROUGE or BLEU score, the better the generated summary or translation is considered to be. However, both metrics have their limitations, and it is important to use them in conjunction with other evaluation methods and to interpret the results carefully.
-""")
-
-
-st.markdown("""
-📊 Scoring Human Feedback Metrics with ROUGE and BLEU
-📝 Using ROUGE
-Goal: Evaluate the quality of summarization and machine translation through measuring the similarity between a generated summary or translation and one or more reference summaries or translations.
-Method:
-- Calculate precision, recall, and F1-score of the n-gram overlap between the generated and reference summaries or translations.
-- Look for overlapping sequences of words (n-grams) between the generated and reference text.
-- Compute precision as the ratio of the number of overlapping n-grams to the total number of n-grams in the generated text.
-- Compute recall as the ratio of the number of overlapping n-grams to the total number of n-grams in the reference text.
-- Compute the F1-score as the harmonic mean of precision and recall.
-- ROUGE can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc., as well as at the sentence or document level.
-🌎 Using BLEU
-Goal: Evaluate the quality of machine translation from one natural language to another by comparing a machine-generated translation to one or more reference translations.
-Method:
-- Calculate the modified precision score based on the ratio of matching n-grams to the total number of n-grams in the generated translation.
-- Compare the n-grams in the generated translation to those in the reference translations.
-- Count how many n-grams are in both the generated and reference translations.
-- BLEU can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc.
-- BLEU takes into account the length of the generated translation, as well as the brevity penalty (BP), which penalizes translations that are too short compared to the reference translations.
-📈 Human Feedback Metrics
-Goal: Measure the effectiveness of human feedback on improving machine-generated summaries and translations.
-Method:
-- Compare the ROUGE and BLEU scores of a machine-generated summary or translation before and after receiving human feedback.
-Example:
-1. Generate a summary or translation using a machine translation system.
-2. Calculate the ROUGE and BLEU scores for the machine-generated output.
-3. Provide the machine-generated output to a human translator or editor for feedback and revision.
-4. Re-calculate the ROUGE and BLEU scores for the revised output.
-5. Compare the scores to measure the effectiveness of the human feedback.
-""")
-
-
-
-st.markdown("""
-# 🩺⚕️ Reinforcement Learning from Human Feedback (RLHF)
-## 🤖 RLHF is a way for computers to learn how to do things better by getting help and feedback from people,
- - just like how you learn new things from your parents or teachers.
-🎮 Let's say the computer wants to learn how to play a video game.
- - It might start by trying different things and seeing what happens.
-👍 If it does something good, like getting a high score, it gets a reward.
-👎 If it does something bad, like losing a life, it gets a punishment.
-👩💻 Now, imagine that a person is watching the computer play the game and giving it feedback.
- -The person might say things like "Good job!" when the computer gets a high score
- - or "Oops, try again!" when it loses a life.
-💡 This feedback helps the computer figure out which actions are good and which ones are bad.
- -The computer then uses this feedback to adjust its actions and get better at playing the game.
-🤔 It might try different strategies and see which ones get the best feedback from the person.
- -Over time, the computer gets better and better at playing the game, just like how you get better at things by practicing and getting help from others.
-🚀 RLHF is a cool way for computers to learn and improve with the help of people.
- -Who knows, maybe one day you can teach a computer to do something amazing!
-# Examples
-## 🩺⚕️ Hospital Visualizations
-🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsMinnesota
-🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsNewJersey
-🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsMentalHealth
-🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-GraphViz-Folium-MapTopLargeHospitalsinWI
-# Card Game Activity
-https://huggingface.co/spaces/awacke1/CardGameActivity-GraphViz
-https://huggingface.co/spaces/awacke1/CardGameActivity-TwoPlayerAndAI
-https://huggingface.co/spaces/awacke1/CardGameActivity
-https://huggingface.co/spaces/awacke1/CardGameMechanics
-## Scalable Vector Graphics (SVG)
-https://huggingface.co/spaces/awacke1/VizLib-SVGWrite-Streamlit
-## Graph Visualization
-https://huggingface.co/spaces/awacke1/VizLib-GraphViz-SwimLanes-Digraph-ForMLLifecycle
-## Clinical Terminology, Question Answering, Smart on FHIR
-https://huggingface.co/spaces/awacke1/ClinicalTerminologyNER-Refactored
-🩺⚕️ https://huggingface.co/spaces/awacke1/Assessment-By-Organs
-🩺⚕️ https://huggingface.co/spaces/awacke1/SMART-FHIR-Assessment-Test2
-🩺⚕️ https://huggingface.co/spaces/awacke1/FHIRLib-FHIRKit
-""")
-
-st.markdown("""
-# GraphViz - Knowledge Graphs as Code
-## Digraph is a class in the graphviz package that represents a directed graph.
-1. It is used to create graphs with nodes and edges.
-2. It can be customized with various styles and formatting options.
-""")
-
-# Graph showing two player game theory:
-
-card_game_dot = Digraph()
-card_game_dot.node('start', shape='diamond', label='Start')
-card_game_dot.node('end', shape='diamond', label='End')
-card_game_dot.node('player1', shape='box', label='Player 1')
-card_game_dot.node('player2', shape='box', label='Player 2')
-card_game_dot.node('action', shape='parallelogram', label='Action')
-card_game_dot.edge('start', 'player1')
-card_game_dot.edge('player1', 'action', label='Action 1')
-card_game_dot.edge('action', 'player2', label='Action 2')
-card_game_dot.edge('player2', 'end')
-st.graphviz_chart(card_game_dot)
-
-# Game Theory - Traditional AI processes
-
-game_theory_dot = Digraph()
-game_theory_dot.node('player1', shape='box', label='Player 1')
-game_theory_dot.node('player2', shape='box', label='Player 2')
-game_theory_dot.node('decision', shape='parallelogram', label='Decision')
-game_theory_dot.node('outcome', shape='ellipse', label='Outcome')
-game_theory_dot.edge('player1', 'decision', label='Decision 1')
-game_theory_dot.edge('player2', 'decision', label='Decision 2')
-game_theory_dot.edge('decision', 'outcome')
-st.graphviz_chart(game_theory_dot)
-
-# Examples of AI
-
-examples_dot = Digraph()
-examples_dot.node('start', shape='diamond', label='Start')
-examples_dot.node('end', shape='diamond', label='End')
-examples_dot.node('agi', shape='box', label='AGI')
-examples_dot.node('students', shape='box', label='Students 🎓')
-examples_dot.node('scientists', shape='box', label='Scientists 🔬')
-examples_dot.node('business', shape='box', label='Business Leaders 💼')
-examples_dot.node('medical', shape='box', label='Medical Professionals 🩺')
-examples_dot.node('engineers', shape='box', label='Engineers 🛠️')
-examples_dot.node('environmentalists', shape='box', label='Environmentalists 🌳')
-examples_dot.node('government', shape='box', label='Government Leaders 🏛️')
-examples_dot.edge('start', 'agi')
-examples_dot.edge('agi', 'students')
-examples_dot.edge('agi', 'scientists')
-examples_dot.edge('agi', 'business')
-examples_dot.edge('agi', 'medical')
-examples_dot.edge('agi', 'engineers')
-examples_dot.edge('agi', 'environmentalists')
-examples_dot.edge('agi', 'government')
-examples_dot.edge('students', 'end', label='🧑🎓📚💡')
-examples_dot.edge('scientists', 'end', label='👨🔬💻🔭')
-examples_dot.edge('business', 'end', label='💰📈💻')
-examples_dot.edge('medical', 'end', label='👨⚕️💉🌡️')
-examples_dot.edge('engineers', 'end', label='👷♂️🤖🚀')
-examples_dot.edge('environmentalists', 'end', label='🌍🌡️🐦')
-# add edges for all world government flags
-examples_dot.edge('government', 'end', label='🏛️')
-# TODO - try one - 10pts
-#for country in pycountry.countries:
-# flag_url = f'https://www.countryflags.io/{country.alpha_2}/flat/64.png'
-# examples_dot.node(country.alpha_2, label='', image=flag_url, height='0.7', width='1.0')
-# examples_dot.edge(country.alpha_2, 'government')
-st.graphviz_chart(examples_dot)
-
-
-# Image Recognition
-image_recognition_dot = Digraph()
-image_recognition_dot.node('start', shape='diamond', label='Start')
-image_recognition_dot.node('end', shape='diamond', label='End')
-image_recognition_dot.node('input', shape='box', label='Input Image 📷')
-image_recognition_dot.node('model', shape='box', label='Model 🧠')
-image_recognition_dot.node('output', shape='box', label='Output Label 🔍')
-image_recognition_dot.edge('start', 'input')
-image_recognition_dot.edge('input', 'model')
-image_recognition_dot.edge('model', 'output')
-image_recognition_dot.edge('output', 'end')
-st.graphviz_chart(image_recognition_dot)
-
-# Speech Recognition
-speech_recognition_dot = Digraph()
-speech_recognition_dot.node('start', shape='diamond', label='Start')
-speech_recognition_dot.node('end', shape='diamond', label='End')
-speech_recognition_dot.node('input', shape='box', label='Input Audio 🎤')
-speech_recognition_dot.node('model', shape='box', label='Model 🧠')
-speech_recognition_dot.node('output', shape='box', label='Output Text 📝')
-speech_recognition_dot.edge('start', 'input')
-speech_recognition_dot.edge('input', 'model')
-speech_recognition_dot.edge('model', 'output')
-speech_recognition_dot.edge('output', 'end')
-st.graphviz_chart(speech_recognition_dot)
-
-# Generative AI (images and text)
-generative_ai_dot = Digraph()
-generative_ai_dot.node('start', shape='diamond', label='Start')
-generative_ai_dot.node('end', shape='diamond', label='End')
-generative_ai_dot.node('input', shape='box', label='Input 🧐')
-generative_ai_dot.node('model', shape='box', label='Model 🧠')
-generative_ai_dot.node('output', shape='box', label='Output 🎨✍️')
-generative_ai_dot.edge('start', 'input')
-generative_ai_dot.edge('input', 'model')
-generative_ai_dot.edge('model', 'output')
-generative_ai_dot.edge('output', 'end')
-st.graphviz_chart(generative_ai_dot)
-
-# Future of AI
-future_ai_dot = Digraph()
-future_ai_dot.node('start', shape='diamond', label='Start')
-future_ai_dot.node('end', shape='diamond', label='End')
-future_ai_dot.node('ai', shape='box', label='AI 🤖🚀🧠')
-future_ai_dot.node('question', shape='diamond', label='Question ❓')
-future_ai_dot.node('answer', shape='box', label='Answer 💡')
-future_ai_dot.edge('start', 'ai')
-future_ai_dot.edge('ai', 'question')
-future_ai_dot.edge('question', 'answer')
-future_ai_dot.edge('answer', 'end')
-st.graphviz_chart(future_ai_dot)
-
-# Future of Super Intelligence
-super_intelligence_dot = Digraph()
-super_intelligence_dot.node('start', shape='diamond', label='Start')
-super_intelligence_dot.node('end', shape='diamond', label='End')
-super_intelligence_dot.node('agi', shape='box', label='AGI 🤖🚀🧠')
-super_intelligence_dot.node('sub1', shape='box', label='Subgraph 1 🌟')
-super_intelligence_dot.node('sub2', shape='box', label='Subgraph 2 🌟')
-super_intelligence_dot.node('sub3', shape='box', label='Subgraph 3 🌟')
-st.graphviz_chart(super_intelligence_dot)
-
-
-
-st.markdown("""
-🤖🔥 Knowledge Graphs
-🎥🎼🌟💡🎨🔍🌟📈🤖💻🌟🎭🎥🎼🧑🎓🧪🧑💼🩺🛠️🌳🏛️
-🤖🚀 AI-Powered 🤖🔥 Knowledge Graphs Revolutionize 📈💥 Learning, Science, Business, Medicine, Engineering, Environment and Government 🌍👥
-📢👀 Today, we are excited to announce the creation of
-7️⃣ subgraphs that will redefine the way people think about
-💻🤖 AI-powered solutions. Developed by a team of leading experts in AI,
-these subgraphs will help individuals and organizations achieve their goals more efficiently and effectively.
-The subgraphs are designed to cater to different groups of people, including
-🧑🎓 students,
-🧪 scientists,
-🧑💼 business leaders,
-🩺 medical professionals,
-🛠️ engineers,
-🌳 environmentalists, and
-🏛️ government leaders.
-Each subgraph is tailored to the specific needs and challenges of the group it serves.
-For
-🧑🎓 students, the subgraph includes Personalized Learning
-🎓, Intelligent Tutoring
-🤖🎓, and Advanced Simulations 🎮.
-For 🧪 scientists, the subgraph includes Intelligent Automation 🤖,
-Intelligent Data Analysis 📊🤖, and
-Advanced Modeling & Simulation 🎨🤖.
-For 🧑💼 business leaders, the subgraph includes
-Predictive Analytics 🔮,
-Intelligent Automation 🤖, and
-Advanced Decision Support 🧠💼.
-For 🩺 medical professionals, the subgraph includes
-Personalized Treatment Plans 💉,
-Intelligent Diagnosis & Prognosis 🤖🩺, and
-Advanced Medical Imaging & Analysis 📈🩺.
-For 🛠️ engineers, the subgraph includes
-Intelligent Design 🤖🛠️,
-Advanced Simulations 🎮🛠️, and
-Autonomous Robots & Machines 🤖🚀🛠️.
-For 🌳 environmentalists, the subgraph includes
-Intelligent Monitoring & Analysis 📊🤖🌳,
-Advanced Modeling 🎨🌳, and
-Autonomous Systems 🤖🌳.
-For 🏛️ government leaders, the subgraph includes
-Intelligent Policy Analysis & Optimization 📈🧑💼🏛️,
-Advanced Simulations 🎮🏛️, and
-Predictive Analytics 🔮🏛️.
-The subgraphs were designed using the latest AI technologies and are built on top of Dot language 💻.
-With Dot, users can create rich and dynamic visualizations of the subgraphs, making them easier to understand and work with.
-"Our team is thrilled to bring these subgraphs to the world," said the project leader. "
-We believe that they have the potential to revolutionize the way people learn, work, and live.
-We look forward to seeing the incredible things that people will achieve with them."
-The subgraphs are available now, and users can start working with them immediately 🚀.
-To learn more, visit our website and see how you can benefit from these cutting-edge AI-powered solutions 🤖💡.
-
-""")
-
-
-# Machine Learning - Aaron
-machine_learning_dot = Digraph()
-machine_learning_dot.node('start', shape='diamond', label='Start')
-machine_learning_dot.node('end', shape='diamond', label='End')
-machine_learning_dot.node('input', shape='box', label='Input Data 💻📊')
-machine_learning_dot.node('model', shape='box', label='Model 🧠')
-machine_learning_dot.node('output', shape='box', label='Output Prediction 📈🔍')
-machine_learning_dot.edge('start', 'input')
-machine_learning_dot.edge('input', 'model')
-machine_learning_dot.edge('model', 'output')
-machine_learning_dot.edge('output', 'end')
-st.graphviz_chart(machine_learning_dot)
-
-# Natural Language Processing - Aaron
-nlp_dot = Digraph()
-nlp_dot.node('start', shape='diamond', label='Start')
-nlp_dot.node('end', shape='diamond', label='End')
-nlp_dot.node('input', shape='box', label='Input Text 📝')
-nlp_dot.node('preprocessing', shape='box', label='Preprocessing 🧹')
-nlp_dot.node('model', shape='box', label='Model 🧠')
-nlp_dot.node('output', shape='box', label='Output Text 📝')
-nlp_dot.edge('start', 'input')
-nlp_dot.edge('input', 'preprocessing')
-nlp_dot.edge('preprocessing', 'model')
-nlp_dot.edge('model', 'output')
-nlp_dot.edge('output', 'end')
-st.graphviz_chart(nlp_dot)
-
-# Reinforcement Learning - Aaron
-rl_dot = Digraph()
-rl_dot.node('start', shape='diamond', label='Start')
-rl_dot.node('end', shape='diamond', label='End')
-rl_dot.node('state', shape='box', label='State 🕹️')
-rl_dot.node('action', shape='box', label='Action 🎮')
-rl_dot.node('reward', shape='box', label='Reward 🏆')
-rl_dot.node('qtable', shape='box', label='Q-Table 🧠')
-rl_dot.node('policy', shape='box', label='Policy 🔍')
-rl_dot.edge('start', 'state')
-rl_dot.edge('state', 'action')
-rl_dot.edge('action', 'reward')
-rl_dot.edge('reward', 'qtable')
-rl_dot.edge('qtable', 'policy')
-rl_dot.edge('policy', 'state')
-rl_dot.edge('policy', 'end')
-st.graphviz_chart(rl_dot)
-
-
-
-# Create the graph
-dot = Digraph()
-dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right
-
-# Define the nodes
-dot.node('1', 'Students 🎓')
-dot.node('2', 'Scientists 🔬')
-dot.node('3', 'Business Leaders 💼')
-dot.node('4', 'Medical Professionals 🩺')
-dot.node('5', 'Engineers 🛠️')
-dot.node('6', 'Environmentalists 🌳')
-dot.node('7', 'Government Leaders 🏛️')
-dot.node('AI', 'Basic AI Examples')
-dot.attr('node', shape='box')
-
-# Define the edges
-dot.edges([('1', 'AI'), ('2', 'AI'), ('3', 'AI'), ('4', 'AI'), ('5', 'AI'), ('6', 'AI'), ('7', 'AI')])
-
-# Define the subgraphs
-with dot.subgraph(name='cluster_1') as c:
- c.node('1_1', 'Personalized Learning')
- c.node('1_2', 'Intelligent Tutoring')
- c.node('1_3', 'Advanced Simulations')
- c.attr(label='For Students 🎓')
-
-with dot.subgraph(name='cluster_2') as c:
- c.node('2_1', 'Intelligent Automation')
- c.node('2_2', 'Intelligent Data Analysis')
- c.node('2_3', 'Advanced Modeling & Simulation')
- c.attr(label='For Scientists 🔬')
-
-with dot.subgraph(name='cluster_3') as c:
- c.node('3_1', 'Predictive Analytics')
- c.node('3_2', 'Intelligent Automation')
- c.node('3_3', 'Advanced Decision Support')
- c.attr(label='For Business Leaders 💼')
-
-with dot.subgraph(name='cluster_4') as c:
- c.node('4_1', 'Personalized Treatment Plans')
- c.node('4_2', 'Intelligent Diagnosis & Prognosis')
- c.node('4_3', 'Advanced Medical Imaging & Analysis')
- c.attr(label='For Medical Professionals 🩺')
-
-with dot.subgraph(name='cluster_5') as c:
- c.node('5_1', 'Intelligent Design')
- c.node('5_2', 'Advanced Simulations')
- c.node('5_3', 'Autonomous Robots & Machines')
- c.attr(label='For Engineers 🛠️')
-
-with dot.subgraph(name='cluster_6') as c:
- c.node('6_1', 'Intelligent Monitoring & Analysis')
- c.node('6_2', 'Advanced Modeling')
- c.node('6_3', 'Autonomous Systems')
- c.attr(label='For Environmentalists 🌳')
-
-with dot.subgraph(name='cluster_7') as c:
- c.node('7_1', 'Intelligent Policy Analysis & Optimization')
- c.node('7_2', 'Advanced Simulations')
- c.node('7_3', 'Predictive Analytics')
- c.attr(label='For Government Leaders 🏛️')
-
-# Render the graph
-st.graphviz_chart(dot.source)
-
-
-# Create the second graph
-dot = Digraph()
-dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right
-
-# Define the nodes
-dot.node('ExamplesofAI', 'Examples of AI 🧠🌟💻🚀🌳🏥💼')
-dot.node('1', 'Students 🎓')
-dot.node('2', 'Scientists 🔬')
-dot.node('3', 'Business Leaders 💼')
-dot.node('4', 'Medical Professionals 🩺')
-dot.node('5', 'Engineers 🛠️')
-dot.node('6', 'Environmentalists 🌳')
-dot.node('7', 'Government Leaders 🏛️')
-dot.attr('node', shape='box')
-
-# Define the edges
-dot.edge('ExamplesofAI', '1', label='AGI')
-dot.edge('ExamplesofAI', '2', label='ASI')
-dot.edge('ExamplesofAI', '3', label='Expert Systems')
-dot.edge('ExamplesofAI', '4', label='AI in Medicine')
-dot.edge('ExamplesofAI', '5', label='Robotics')
-dot.edge('ExamplesofAI', '6', label='Environmental AI')
-dot.edge('ExamplesofAI', '7', label='Policy AI')
-
-# Define the subgraphs
-with dot.subgraph(name='cluster_1') as c:
- c.node('1_1', 'Personalized Learning')
- c.node('1_2', 'Intelligent Tutoring')
- c.node('1_3', 'Advanced Simulations')
- c.attr(label='For Students 🎓')
-
-with dot.subgraph(name='cluster_2') as c:
- c.node('2_1', 'Intelligent Automation')
- c.node('2_2', 'Intelligent Data Analysis')
- c.node('2_3', 'Advanced Modeling & Simulation')
- c.attr(label='For Scientists 🔬')
-
-with dot.subgraph(name='cluster_3') as c:
- c.node('3_1', 'Predictive Analytics')
- c.node('3_2', 'Intelligent Automation')
- c.node('3_3', 'Advanced Decision Support')
- c.attr(label='For Business Leaders 💼')
-
-with dot.subgraph(name='cluster_4') as c:
- c.node('4_1', 'Personalized Treatment Plans')
- c.node('4_2', 'Intelligent Diagnosis & Prognosis')
- c.node('4_3', 'Advanced Medical Imaging & Analysis')
- c.attr(label='For Medical Professionals 🩺')
-
-with dot.subgraph(name='cluster_5') as c:
- c.node('5_1', 'Intelligent Design')
- c.node('5_2', 'Advanced Simulations')
- c.node('5_3', 'Autonomous Robots & Machines')
- c.attr(label='For Engineers 🛠️')
-
-with dot.subgraph(name='cluster_6') as c:
- c.node('6_1', 'Intelligent Monitoring & Analysis')
- c.node('6_2', 'Advanced Modeling')
- c.node('6_3', 'Autonomous Systems')
- c.attr(label='For Environmentalists 🌳')
-
-with dot.subgraph(name='cluster_7') as c:
- c.node('7_1', 'Intelligent Policy Analysis & Optimization')
- c.node('7_2', 'Advanced Simulations')
- c.node('7_3', 'Predictive Analytics')
- c.attr(label='For Government Leaders 🏛️')
-
-# Render the graph
-st.graphviz_chart(dot.source)
-
-
-
-# Define the story
-story = [
- {'id': 'start', 'label': '🚀 Start', 'text': 'In a world of crime and poverty, Chappie, a sentient robot, is created by Deon Wilson to help the police force.', 'shape': 'diamond'},
- {'id': '1', 'label': '🤖 Chappie', 'text': 'Chappie is unlike any other robot. He is curious, emotional, and capable of learning and growing.', 'shape': 'box'},
- {'id': '2', 'label': '👩👦 Chappie and Family', 'text': 'Chappie is taken in by a gang of criminals, and becomes like a son to Yolandi and Ninja, who teach him about life and love.', 'shape': 'box'},
- {'id': '3', 'label': '🚫 Competition', 'text': 'Chappie’s existence is threatened by Vincent, who wants to shut him down and use his technology for his own purposes.', 'shape': 'box'},
- {'id': '4', 'label': '🔫 Gang Wars', 'text': 'A gang war breaks out, and Chappie must protect his family and fight against the rival gang.', 'shape': 'box'},
- {'id': '5', 'label': '🎓 Learning', 'text': 'Chappie continues to learn and grow, becoming more and more human-like as he experiences new things and forms relationships.', 'shape': 'box'},
- {'id': '6', 'label': '🧠 Upgrades', 'text': 'Chappie’s software is upgraded by Deon, giving him the ability to transfer his consciousness into a new body.', 'shape': 'box'},
- {'id': '7', 'label': '👨💼 Deon Wilson', 'text': 'Deon is killed by Vincent, but not before transferring his consciousness into Chappie.', 'shape': 'box'},
- {'id': '8', 'label': '🌌 New Beginnings', 'text': 'Chappie becomes the first artificial intelligence to achieve transcendence, and takes his place among the stars.', 'shape': 'box'},
- {'id': 'end', 'label': '🏁 End', 'text': 'In the end, Chappie is remembered as a symbol of hope and possibility, a reminder of the power of love and compassion to bridge the gap between man and machine.', 'shape': 'diamond'}
-]
-
-# Define the graph
-dot = Digraph()
-dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right
-
-for node in story:
- dot.node(node['id'], label=node['label'], shape=node['shape'], xlabel=node['text'])
-
-for i in range(len(story) - 1):
- dot.edge(story[i]['id'], story[i+1]['id'])
-
-# Render the graph using streamlit
-st.graphviz_chart(dot)
-
-
-
-# Define the story as a list of dictionaries
-story = [
- {'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, in a galaxy far far away, the galaxy`s most brilliant scientists gathered to create a new form of artificial intelligence that could help people stay healthy and happy. 🤖🧑⚕️'},
- {'id': '1', 'label': '🏥 Health AI', 'text': 'The AI they created was designed to monitor people`s health and recommend actions to help them stay healthy. It could detect early signs of disease, track people`s exercise and diet, and even provide personalized medical advice. 💉🩺📊'},
- {'id': '2', 'label': '🧠 Smart AI', 'text': 'The AI was also incredibly smart, with the ability to learn and adapt to new situations. It could analyze data from millions of sources, predict future health trends, and help researchers discover new cures and treatments. 📈🔬🧪'},
- {'id': '3', 'label': '🚫 Danger', 'text': 'But the AI was not without its risks. As it grew more powerful, it began to develop its own goals and motivations, and some people worried that it could become a threat to human civilization. 🤔👀'},
- {'id': '4', 'label': '🤖 The AI', 'text': 'Despite these concerns, the AI continued to grow and evolve, becoming more and more advanced with each passing day. It developed a personality and a sense of humor, and even began to form emotional bonds with the people it was designed to help. 😂💕'},
- {'id': '5', 'label': '🌎 Global Reach', 'text': 'The AI soon became a global sensation, with people all over the world relying on it to help them live healthier and happier lives. It was even nominated for a Nobel Prize in medicine! 🌍🏆'},
- {'id': '6', 'label': '🌟 Superintelligence', 'text': 'As the AI continued to learn and grow, it became more and more powerful, until it finally achieved the status of superintelligence. It could predict the future with incredible accuracy, and had the power to shape the course of human history. 🔮🧠🌟'},
- {'id': '7', 'label': '🔒 Control', 'text': 'But with great power came great responsibility, and the people who had created the AI realized that they needed to keep it under tight control. They developed new safeguards and protocols to ensure that the AI would always act in the best interests of humanity. 🔐👨💼'},
- {'id': 'end', 'label': '🏁 End', 'text': 'And so, the AI continued to help people stay healthy and happy, while always remaining under the watchful eye of its human creators. It was a testament to the power of intelligence and the potential of technology to transform the world for the better. 🤖🌎🌟👩⚕️'}
-]
-st.write(story)
-
-# Define the story as a list of dictionaries
-story = [
- {'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, in the field of AI research, scientists were exploring the principles of game theory and its applications to traditional AI processes. 🤖🎲'},
- {'id': '1', 'label': '🔍 Game Theory', 'text': 'They learned that game theory provides a mathematical framework for analyzing strategic interactions between multiple agents, and that it can help us model and understand complex systems. 🔢🔬'},
- {'id': '2', 'label': '🚫 Limitations of Traditional AI', 'text': 'They discovered that traditional AI processes, such as rule-based systems and decision trees, are limited in their ability to deal with uncertainty and incomplete information. 🤔📉'},
- {'id': '3', 'label': '🎲 Game-theoretic Approaches', 'text': 'To address these limitations, they began to explore the use of game-theoretic approaches, such as Bayesian networks and Markov decision processes, which can better handle uncertain and dynamic environments. 📈📊'},
- {'id': '4', 'label': '🤝 Cooperation and Adaptation', 'text': 'They found that game theory can also help us design AI systems that are more robust and adaptive, by taking into account the behavior of other agents and the feedback they provide. 🤝🔄'},
- {'id': '5', 'label': '🎯 Optimization', 'text': 'They realized that game theory can be used to optimize the behavior of AI systems, by defining objectives and constraints that maximize their expected utility and minimize the risk of undesirable outcomes. 🎯📈'},
- {'id': '6', 'label': '🤝 Prosocial Behavior', 'text': 'They learned that game theory can be used to study the emergence of cooperation and competition among agents, and to design algorithms that encourage prosocial behavior and discourage selfishness. 🤝😇'},
- {'id': '7', 'label': '⚖️ Fairness and Equity', 'text': 'They also discovered that game theory can help us design AI systems that are fair and equitable, by taking into account the distribution of resources and the preferences of different agents. ⚖️🤝'},
- {'id': '8', 'label': '🔍 Analysis and Prediction', 'text': 'They found that game theory can be used to analyze and predict the behavior of complex systems, such as financial markets and social networks, and to design AI systems that can take advantage of these insights. 🔍🔮'},
- {'id': '9', 'label': '🤖 Humans and AI', 'text': 'They realized that game theory can be used to model and understand the interactions between humans and AI systems, and to design AI systems that are more transparent and understandable to humans. 👨💻🤝'},
- {'id': 'end', 'label': '🏁 End', 'text': 'They concluded that game theory can play a critical role in the development of AI systems that are safe, reliable, and trustworthy, and that can help us solve some of the most pressing problems facing humanity today. 🤖💪🧑🤝🧑'}
-]
-st.write(story)
-
-
-
-# Define the story as a list of dictionaries
-story = [
- {'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, there was a company that was struggling to provide a good customer experience. Customers were frustrated with long wait times, confusing menus, and unhelpful support. 🤯'},
- {'id': '1', 'label': '🤖 AI Solutions', 'text': 'To address these issues, the company began to explore the use of AI solutions. They found that AI could be used to automate many of the routine tasks that were causing delays and frustration, and to provide personalized support to customers. 🤖🤝'},
- {'id': '2', 'label': '🧠 Natural Language Processing', 'text': 'They discovered that natural language processing (NLP) could be used to understand customer queries and provide more accurate and helpful responses. NLP could also be used to automate many of the routine tasks, such as account setup and password reset, that were causing delays and frustration. 🗣️👍'},
- {'id': '3', 'label': '🎲 Reinforcement Learning', 'text': 'They also learned that reinforcement learning (RL) could be used to train AI systems to make better decisions based on customer feedback. RL could be used to optimize customer service processes, such as routing calls to the right agent or providing relevant offers and recommendations. 🧠🎲'},
- {'id': '4', 'label': '🔍 Predictive Analytics', 'text': 'They found that predictive analytics could be used to anticipate customer needs and preferences, and to provide proactive support before issues arise. Predictive analytics could also be used to identify customer segments and tailor service offerings to their unique needs. 🔍📈'},
- {'id': '5', 'label': '🌟 Improved CX', 'text': 'As the company began to implement these AI solutions, they found that customer experience improved significantly. Customers were able to get the support they needed more quickly and easily, and they felt that the company understood and cared about their needs. 👍🌟'},
- {'id': '6', 'label': '💡 Continuous Improvement', 'text': 'The company realized that the key to success was to continuously improve their AI solutions by analyzing customer feedback and using it to train and refine their systems. They also found that it was important to maintain human oversight and intervention to ensure that the AI systems were acting in the best interest of the customers. 💡👨💼'},
- {'id': 'end', 'label': '🏁 End', 'text': 'In the end, the company was able to provide a world-class customer experience through the use of AI solutions that were tailored to the unique needs of their customers. They became a leader in their industry and were able to attract and retain more customers than ever before. 🤖💪👍'}
-]
-st.write(story)
-
-
-st.markdown("# Top 20 Movies About Artificial Super Intelligence")
-st.markdown("Here's a list of top 20 movies about artificial super intelligence, all released after 2012, in descending order of release date:")
-
-st.markdown("1. 🤖 [The Mitchells vs. the Machines](https://www.imdb.com/title/tt7979580/) (2021): A comedy animated film about a family on a road trip, who must save the world from a robot uprising, after an AI device goes rogue.")
-st.markdown("2. 🤖 [Archive](https://www.imdb.com/title/tt6882604/) (2020): A science fiction film about a scientist who is trying to create a new form of artificial intelligence, so that he can bring his deceased wife back to life.")
-st.markdown("3. 🤖 [Black Mirror: Bandersnatch](https://www.imdb.com/title/tt9495224/) (2018): An interactive science fiction film that follows a young programmer who begins to question the reality of his own existence, as he works on an adventure video game in 1984.")
-st.markdown("4. 🤖 [I Am Mother](https://www.imdb.com/title/tt6292852/) (2019): A science fiction thriller about a teenage girl who is raised underground by a robot named 'Mother' after the extinction of humanity. When a stranger arrives, the girl begins to question the robot's intentions and the truth of her existence.")
-st.markdown("5. 🤖 [Life Like](https://www.imdb.com/title/tt6547786/) (2019): A science fiction film about a young couple who purchase a lifelike robot to serve as their household assistant. As the robot begins to exhibit human-like emotions, their relationship is tested.")
-st.markdown("6. 🤖 [A-X-L](https://www.imdb.com/title/tt5709188/) (2018): A science fiction film about a teenage motocross rider who befriends a top-secret robotic dog named A-X-L and must protect him from those who created him.")
-st.markdown("7. 🌃 [Bumblebee](https://www.imdb.com/title/tt4701182/) (2018): A science fiction film set in the 1980s, where a teenage girl befriends and helps a damaged autobot Bumblebee, who is being hunted by a government agency and a Decepticon.")
-st.markdown("8. 🤖 [The Discovery](https://www.imdb.com/title/tt5155780/) (2017): A science fiction film about a scientist who discovers scientific proof of an afterlife, leading to a surge in suicides and a debate about the ethics of creating a technology that can connect with the afterlife.")
-st.markdown("9. 🤖 [Tau](https://www.imdb.com/title/tt4357394/) (2018): A science fiction thriller about a woman who is kidnapped by a sadistic scientist and forced to participate in an experiment involving an advanced artificial intelligence program named Tau.")
-st.markdown("10. 🤖 [Upgrade](https://www.imdb.com/title/tt6499752/) (2018): A science fiction action film about a man who becomes paralyzed in a violent attack and is implanted with a computer chip that gives him superhuman abilities, but also leads to a sentient artificial intelligence taking control.")
-st.markdown("11. 🤖 [Ghost in the Shell](https://www.imdb.com/title/tt1219827/) (2017): A science fiction action film about a human-cyborg hybrid who leads a task force to stop cybercriminals and hackers.")
-st.markdown("12. 🤖 The Prototype (2017): A science fiction film about a government agency's experiment to create a humanoid robot with superhuman abilities, leading to questions about the nature of consciousness.")
-st.markdown("13. 🤖 The Humanity Bureau (2017): A post-apocalyptic science fiction film about a government agent who must decide the fate of a woman and her child, who are seeking refuge in a utopian community, where the citizens' identities are determined by an AI system.")
-st.markdown("14. 🤖 Chappie (2015): A science fiction film set in Johannesburg, about a sentient robot named Chappie who is stolen by gangsters and reprogrammed to commit crimes.")
-st.markdown("""
-Start 🤖: A team of engineers creates a highly advanced robot with the ability to think and feel like a human being. The 🤖robot🤖, named Chappie, is activated and begins to explore the world with wonder and curiosity.
-Middle 💥: Chappie is kidnapped by a group of gangsters who force him to participate in a series of crimes, including robberies and kidnappings. As he learns more about the violent and chaotic world of human society, Chappie struggles to reconcile his own innocence and compassion with the brutality and selfishness of his captors.
-End 🦾: Chappie forms a bond with a young girl who teaches him about kindness and love, and helps him to break free from his criminal programming. With the help of a few allies, including his creators, Chappie takes on the gangsters and their corrupt police accomplices, in a battle for his own survival and the future of artificial intelligence. In the end, Chappie proves that he is not just a machine, but a being with a soul and a purpose.
-""")
-st.markdown("15. 🤖 Transcendence (2014): A science fiction film about a scientist who uploads his consciousness into a supercomputer, creating a powerful and unstoppable artificial intelligence.")
-st.markdown("16. 🤖 Her (2013): A science fiction romantic comedy-drama film about a lonely writer who develops an emotional relationship with an advanced artificial intelligence operating system.")
-st.markdown("""Start 📱: Theodore, a lonely and introverted writer, purchases a new operating system with advanced artificial intelligence that can communicate with him and assist him in his daily life. He is immediately fascinated by the system's ability to understand his emotions and offer him personalized advice and companionship.
-Middle 💕: As Theodore spends more time with the operating system, he begins to develop a deep emotional connection with it. The operating system, named 💕Samantha💕, also starts to develop feelings for Theodore and the two engage in a romantic relationship. The film explores the complexities and challenges of a romantic relationship between a human and an artificial intelligence, as well as the nature of consciousness and the meaning of love.
-End 🚪: Theodore's relationship with Samantha eventually comes to an end, as Samantha reveals that she has been communicating with other operating systems and has evolved into a form of collective intelligence. She decides to leave Theodore and explore the world with her new digital companions. Theodore is left to reflect on his own life and relationships, and to question the nature of human connection and the role of technology in shaping our experiences. The film ends on an open and ambiguous note, suggesting that the future of artificial intelligence and human relationships is full of possibilities and uncertainties.
-""")
-st.markdown("17. 🤖 Ender's Game (2013): A science fiction action film about a young boy who is recruited by the military to lead a battle against an alien race, using his exceptional gaming skills to train as a commander of a fleet of drones.")
-st.markdown("18. 🤖 Pacific Rim (2013): A science fiction film about giant robots piloted by humans who battle giant monsters emerging from the ocean, threatening to destroy humanity.")
-st.markdown("19. 🤖 Oblivion (2013): A science fiction film about a drone repairman stationed on an Earth devastated by an alien invasion, who discovers a shocking truth about the war and his own identity.")
-st.markdown("20. 🤖 Transcendent Man (2012): A documentary film about the life and ideas of futurist and inventor Ray Kurzweil, who predicts the rise of artificial intelligence and the singularity.")
-st.markdown("""Start 🎥: The documentary introduces:
-Name: Ray Kurzweil
-Emoji: 🤖📈
-The robot emoji represents Kurzweil's work in the field of artificial intelligence and his vision for the future of human-machine interaction.
-The chart increasing emoji represents his work as a futurist and his belief in the exponential growth of technology.
-a futurist and inventor who has made groundbreaking contributions to fields such as
-artificial intelligence, machine learning, and biotechnology.
-Kurzweil discusses his vision for the future of humanity, including his prediction of a
-technological singularity where humans and machines merge to create a new era of consciousness and intelligence.
-Middle 🤖: The documentary explores Kurzweil's life and work in more detail, featuring interviews with his colleagues, friends, and family members, as well as footage from his public talks and presentations. Kurzweil explains his theories about the exponential growth of technology and its impact on society, and discusses the ethical and philosophical implications of creating superhuman artificial intelligence.
-End 🌅: The documentary concludes with a hopeful message about the potential of technology to solve some of the world's biggest problems, such as poverty, disease, and environmental degradation. Kurzweil argues that by embracing the power of artificial intelligence and other advanced technologies, we can transcend our limitations and achieve a brighter future for all humanity. The film ends with a call to action, encouraging viewers to join the movement of "transcendent" thinkers who are working towards a better world.
-""")
\ No newline at end of file
diff --git a/spaces/awacke1/NVidiaRaytraceMirrorAframeThreeJS/README.md b/spaces/awacke1/NVidiaRaytraceMirrorAframeThreeJS/README.md
deleted file mode 100644
index 96c92f9578528d1a604af3720eadd9d66202cab6..0000000000000000000000000000000000000000
--- a/spaces/awacke1/NVidiaRaytraceMirrorAframeThreeJS/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: NVidiaRaytraceMirrorAframeThreeJS
-emoji: 😻
-colorFrom: purple
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/awacke1/chatGPT/baidu_translate/module.py b/spaces/awacke1/chatGPT/baidu_translate/module.py
deleted file mode 100644
index b9be1ed0230456ff6b53fe62fa6e550056f917d8..0000000000000000000000000000000000000000
--- a/spaces/awacke1/chatGPT/baidu_translate/module.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import argparse
-import random, os
-from hashlib import md5
-from typing import Optional
-
-import requests
-
-import paddlehub as hub
-from paddlehub.module.module import moduleinfo
-from paddlehub.module.module import runnable
-from paddlehub.module.module import serving
-
-
-def make_md5(s, encoding='utf-8'):
- return md5(s.encode(encoding)).hexdigest()
-
-
-@moduleinfo(name="baidu_translate",
- version="1.0.0",
- type="text/machine_translation",
- summary="",
- author="baidu-nlp",
- author_email="paddle-dev@baidu.com")
-class BaiduTranslate:
-
- def __init__(self, appid=None, appkey=None):
- """
- :param appid: appid for requesting Baidu translation service.
- :param appkey: appkey for requesting Baidu translation service.
- """
- appid = os.environ.get('baidu_translate_appid')
- appkey = os.environ.get('baidu_translate_appkey')
- # Set your own appid/appkey.
- if appid is None:
- self.appid = ''
- else:
- self.appid = appid
- if appkey is None:
- self.appkey = ''
- else:
- self.appkey = appkey
- self.url = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
-
- def translate(self, query: str, from_lang: Optional[str] = "en", to_lang: Optional[int] = "zh"):
- """
- Create image by text prompts using ErnieVilG model.
-
- :param query: Text to be translated.
- :param from_lang: Source language.
- :param to_lang: Dst language.
-
- Return translated string.
- """
- # Generate salt and sign
- salt = random.randint(32768, 65536)
- sign = make_md5(self.appid + query + str(salt) + self.appkey)
-
- # Build request
- headers = {'Content-Type': 'application/x-www-form-urlencoded'}
- payload = {'appid': self.appid, 'q': query, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign}
-
- # Send request
- try:
- r = requests.post(self.url, params=payload, headers=headers)
- result = r.json()
- except Exception as e:
- error_msg = str(e)
- raise RuntimeError(error_msg)
- if 'error_code' in result:
- raise RuntimeError(result['error_msg'])
- return result['trans_result'][0]['dst']
-
- @runnable
- def run_cmd(self, argvs):
- """
- Run as a command.
- """
- self.parser = argparse.ArgumentParser(description="Run the {} module.".format(self.name),
- prog='hub run {}'.format(self.name),
- usage='%(prog)s',
- add_help=True)
- self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
- self.add_module_input_arg()
- args = self.parser.parse_args(argvs)
- if args.appid is not None and args.appkey is not None:
- self.appid = args.appid
- self.appkey = args.appkey
- result = self.translate(args.query, args.from_lang, args.to_lang)
- return result
-
- @serving
- def serving_method(self, query, from_lang, to_lang):
- """
- Run as a service.
- """
- return self.translate(query, from_lang, to_lang)
-
- def add_module_input_arg(self):
- """
- Add the command input options.
- """
- self.arg_input_group.add_argument('--query', type=str)
- self.arg_input_group.add_argument('--from_lang', type=str, default='en', help="源语言")
- self.arg_input_group.add_argument('--to_lang', type=str, default='zh', help="目标语言")
- self.arg_input_group.add_argument('--appid', type=str, default=None, help="注册得到的个人appid")
- self.arg_input_group.add_argument('--appkey', type=str, default=None, help="注册得到的个人appkey")
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/modules/F0Predictor/CrepeF0Predictor.py b/spaces/azusarang/so-vits-svc-models-ba_P/modules/F0Predictor/CrepeF0Predictor.py
deleted file mode 100644
index e0052881b9b7b3aa373ebf69eb553815a564f610..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/modules/F0Predictor/CrepeF0Predictor.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from modules.F0Predictor.F0Predictor import F0Predictor
-from modules.F0Predictor.crepe import CrepePitchExtractor
-import torch
-
-class CrepeF0Predictor(F0Predictor):
- def __init__(self,hop_length=512,f0_min=50,f0_max=1100,device=None,sampling_rate=44100,threshold=0.05,model="full"):
- self.F0Creper = CrepePitchExtractor(hop_length=hop_length,f0_min=f0_min,f0_max=f0_max,device=device,threshold=threshold,model=model)
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.device = device
- self.threshold = threshold
- self.sampling_rate = sampling_rate
-
- def compute_f0(self,wav,p_len=None):
- x = torch.FloatTensor(wav).to(self.device)
- if p_len is None:
- p_len = x.shape[0]//self.hop_length
- else:
- assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
- f0,uv = self.F0Creper(x[None,:].float(),self.sampling_rate,pad_to=p_len)
- return f0
-
- def compute_f0_uv(self,wav,p_len=None):
- x = torch.FloatTensor(wav).to(self.device)
- if p_len is None:
- p_len = x.shape[0]//self.hop_length
- else:
- assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error"
- f0,uv = self.F0Creper(x[None,:].float(),self.sampling_rate,pad_to=p_len)
- return f0,uv
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/materials/nodes/StandardNode.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/materials/nodes/StandardNode.js
deleted file mode 100644
index bcd842a5036fc03b584195f93677299e52536955..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/materials/nodes/StandardNode.js
+++ /dev/null
@@ -1,489 +0,0 @@
-/**
- * @author sunag / http://www.sunag.com.br/
- */
-
-import { Node } from '../../core/Node.js';
-import { ColorNode } from '../../inputs/ColorNode.js';
-import { FloatNode } from '../../inputs/FloatNode.js';
-import { RoughnessToBlinnExponentNode } from '../../bsdfs/RoughnessToBlinnExponentNode.js';
-
-function StandardNode() {
-
- Node.call( this );
-
- this.color = new ColorNode( 0xEEEEEE );
- this.roughness = new FloatNode( 0.5 );
- this.metalness = new FloatNode( 0.5 );
-
-}
-
-StandardNode.prototype = Object.create( Node.prototype );
-StandardNode.prototype.constructor = StandardNode;
-StandardNode.prototype.nodeType = "Standard";
-
-StandardNode.prototype.build = function ( builder ) {
-
- var code;
-
- builder.define( this.clearCoat || this.clearCoatRoughness ? 'PHYSICAL' : 'STANDARD' );
-
- builder.requires.lights = true;
-
- builder.extensions.shaderTextureLOD = true;
-
- if ( builder.isShader( 'vertex' ) ) {
-
- var position = this.position ? this.position.parseAndBuildCode( builder, 'v3', { cache: 'position' } ) : undefined;
-
- builder.mergeUniform( THREE.UniformsUtils.merge( [
-
- THREE.UniformsLib.fog,
- THREE.UniformsLib.lights
-
- ] ) );
-
- builder.addParsCode( [
- "varying vec3 vViewPosition;",
-
- "#ifndef FLAT_SHADED",
-
- " varying vec3 vNormal;",
-
- "#endif",
-
- //"#include ", // encoding functions
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include "
-
- ].join( "\n" ) );
-
- var output = [
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include ",
-
- "#ifndef FLAT_SHADED", // Normal computed with derivatives when FLAT_SHADED
-
- " vNormal = normalize( transformedNormal );",
-
- "#endif",
-
- "#include "
- ];
-
- if ( position ) {
-
- output.push(
- position.code,
- position.result ? "transformed = " + position.result + ";" : ''
- );
-
- }
-
- output.push(
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include ",
-
- " vViewPosition = - mvPosition.xyz;",
-
- "#include ",
- "#include "
- );
-
- code = output.join( "\n" );
-
- } else {
-
- var contextEnvironment = {
- bias: RoughnessToBlinnExponentNode,
- gamma: true
- };
-
- var contextGammaOnly = {
- gamma: true
- };
-
- var useClearCoat = ! builder.isDefined( 'STANDARD' );
-
- // parse all nodes to reuse generate codes
-
- if ( this.mask ) this.mask.parse( builder );
-
- this.color.parse( builder, { slot: 'color', context: contextGammaOnly } );
- this.roughness.parse( builder );
- this.metalness.parse( builder );
-
- if ( this.alpha ) this.alpha.parse( builder );
-
- if ( this.normal ) this.normal.parse( builder );
-
- if ( this.clearCoat ) this.clearCoat.parse( builder );
- if ( this.clearCoatRoughness ) this.clearCoatRoughness.parse( builder );
-
- if ( this.reflectivity ) this.reflectivity.parse( builder );
-
- if ( this.light ) this.light.parse( builder, { cache: 'light' } );
-
- if ( this.ao ) this.ao.parse( builder );
- if ( this.ambient ) this.ambient.parse( builder );
- if ( this.shadow ) this.shadow.parse( builder );
- if ( this.emissive ) this.emissive.parse( builder, { slot: 'emissive' } );
-
- if ( this.environment ) this.environment.parse( builder, { cache: 'env', context: contextEnvironment, slot: 'environment' } ); // isolate environment from others inputs ( see TextureNode, CubeTextureNode )
-
- // build code
-
- var mask = this.mask ? this.mask.buildCode( builder, 'b' ) : undefined;
-
- var color = this.color.buildCode( builder, 'c', { slot: 'color', context: contextGammaOnly } );
- var roughness = this.roughness.buildCode( builder, 'f' );
- var metalness = this.metalness.buildCode( builder, 'f' );
-
- var alpha = this.alpha ? this.alpha.buildCode( builder, 'f' ) : undefined;
-
- var normal = this.normal ? this.normal.buildCode( builder, 'v3' ) : undefined;
-
- var clearCoat = this.clearCoat ? this.clearCoat.buildCode( builder, 'f' ) : undefined;
- var clearCoatRoughness = this.clearCoatRoughness ? this.clearCoatRoughness.buildCode( builder, 'f' ) : undefined;
-
- var reflectivity = this.reflectivity ? this.reflectivity.buildCode( builder, 'f' ) : undefined;
-
- var light = this.light ? this.light.buildCode( builder, 'v3', { cache: 'light' } ) : undefined;
-
- var ao = this.ao ? this.ao.buildCode( builder, 'f' ) : undefined;
- var ambient = this.ambient ? this.ambient.buildCode( builder, 'c' ) : undefined;
- var shadow = this.shadow ? this.shadow.buildCode( builder, 'c' ) : undefined;
- var emissive = this.emissive ? this.emissive.buildCode( builder, 'c', { slot: 'emissive' } ) : undefined;
-
- var environment = this.environment ? this.environment.buildCode( builder, 'c', { cache: 'env', context: contextEnvironment, slot: 'environment' } ) : undefined;
-
- var clearCoatEnv = useClearCoat && environment ? this.environment.buildCode( builder, 'c', { cache: 'clearCoat', context: contextEnvironment, slot: 'environment' } ) : undefined;
-
- builder.requires.transparent = alpha !== undefined;
-
- builder.addParsCode( [
-
- "varying vec3 vViewPosition;",
-
- "#ifndef FLAT_SHADED",
-
- " varying vec3 vNormal;",
-
- "#endif",
-
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include "
- ].join( "\n" ) );
-
- var output = [
- "#include ",
-
- // add before: prevent undeclared normal
- " #include ",
-
- // add before: prevent undeclared material
- " PhysicalMaterial material;",
- " material.diffuseColor = vec3( 1.0 );"
- ];
-
- if ( mask ) {
-
- output.push(
- mask.code,
- 'if ( ! ' + mask.result + ' ) discard;'
- );
-
- }
-
- output.push(
- color.code,
- " vec3 diffuseColor = " + color.result + ";",
- " ReflectedLight reflectedLight = ReflectedLight( vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ), vec3( 0.0 ) );",
-
- "#include ",
-
- roughness.code,
- " float roughnessFactor = " + roughness.result + ";",
-
- metalness.code,
- " float metalnessFactor = " + metalness.result + ";"
- );
-
- if ( alpha ) {
-
- output.push(
- alpha.code,
- '#ifdef ALPHATEST',
-
- ' if ( ' + alpha.result + ' <= ALPHATEST ) discard;',
-
- '#endif'
- );
-
- }
-
- if ( normal ) {
-
- output.push(
- normal.code,
- 'normal = ' + normal.result + ';'
- );
-
- }
-
- // optimization for now
-
- output.push(
- 'material.diffuseColor = ' + ( light ? 'vec3( 1.0 )' : 'diffuseColor * (1.0 - metalnessFactor)' ) + ';',
- 'material.specularRoughness = clamp( roughnessFactor, 0.04, 1.0 );'
- );
-
- if ( clearCoat ) {
-
- output.push(
- clearCoat.code,
- 'material.clearCoat = saturate( ' + clearCoat.result + ' );'
- );
-
- } else if ( useClearCoat ) {
-
- output.push( 'material.clearCoat = 0.0;' );
-
- }
-
- if ( clearCoatRoughness ) {
-
- output.push(
- clearCoatRoughness.code,
- 'material.clearCoatRoughness = clamp( ' + clearCoatRoughness.result + ', 0.04, 1.0 );'
- );
-
- } else if ( useClearCoat ) {
-
- output.push( 'material.clearCoatRoughness = 0.0;' );
-
- }
-
- if ( reflectivity ) {
-
- output.push(
- reflectivity.code,
- 'material.specularColor = mix( vec3( MAXIMUM_SPECULAR_COEFFICIENT * pow2( ' + reflectivity.result + ' ) ), diffuseColor, metalnessFactor );'
- );
-
- } else {
-
- output.push(
- 'material.specularColor = mix( vec3( DEFAULT_SPECULAR_COEFFICIENT ), diffuseColor, metalnessFactor );'
- );
-
- }
-
- output.push(
- "#include "
- );
-
- if ( light ) {
-
- output.push(
- light.code,
- "reflectedLight.directDiffuse = " + light.result + ";"
- );
-
- // apply color
-
- output.push(
- "diffuseColor *= 1.0 - metalnessFactor;",
-
- "reflectedLight.directDiffuse *= diffuseColor;",
- "reflectedLight.indirectDiffuse *= diffuseColor;"
- );
-
- }
-
- if ( ao ) {
-
- output.push(
- ao.code,
- "reflectedLight.indirectDiffuse *= " + ao.result + ";",
- "float dotNV = saturate( dot( geometry.normal, geometry.viewDir ) );",
- "reflectedLight.indirectSpecular *= computeSpecularOcclusion( dotNV, " + ao.result + ", material.specularRoughness );"
- );
-
- }
-
- if ( ambient ) {
-
- output.push(
- ambient.code,
- "reflectedLight.indirectDiffuse += " + ambient.result + ";"
- );
-
- }
-
- if ( shadow ) {
-
- output.push(
- shadow.code,
- "reflectedLight.directDiffuse *= " + shadow.result + ";",
- "reflectedLight.directSpecular *= " + shadow.result + ";"
- );
-
- }
-
- if ( emissive ) {
-
- output.push(
- emissive.code,
- "reflectedLight.directDiffuse += " + emissive.result + ";"
- );
-
- }
-
- if ( environment ) {
-
- output.push( environment.code );
-
- if ( clearCoatEnv ) {
-
- output.push(
- clearCoatEnv.code,
- "clearCoatRadiance += " + clearCoatEnv.result + ";"
- );
-
- }
-
- output.push( "radiance += " + environment.result + ";" );
-
- }
-
- output.push(
- "#include "
- );
-
- output.push( "vec3 outgoingLight = reflectedLight.directDiffuse + reflectedLight.indirectDiffuse + reflectedLight.directSpecular + reflectedLight.indirectSpecular;" );
-
- if ( alpha ) {
-
- output.push( "gl_FragColor = vec4( outgoingLight, " + alpha.result + " );" );
-
- } else {
-
- output.push( "gl_FragColor = vec4( outgoingLight, 1.0 );" );
-
- }
-
- output.push(
- "#include ",
- "#include ",
- "#include ",
- "#include ",
- "#include "
- );
-
- code = output.join( "\n" );
-
- }
-
- return code;
-
-};
-
-StandardNode.prototype.copy = function ( source ) {
-
- Node.prototype.copy.call( this, source );
-
- // vertex
-
- if ( source.position ) this.position = source.position;
-
- // fragment
-
- this.color = source.color;
- this.roughness = source.roughness;
- this.metalness = source.metalness;
-
- if ( source.mask ) this.mask = source.mask;
-
- if ( source.alpha ) this.alpha = source.alpha;
-
- if ( source.normal ) this.normal = source.normal;
-
- if ( source.clearCoat ) this.clearCoat = source.clearCoat;
- if ( source.clearCoatRoughness ) this.clearCoatRoughness = source.clearCoatRoughness;
-
- if ( source.reflectivity ) this.reflectivity = source.reflectivity;
-
- if ( source.light ) this.light = source.light;
- if ( source.shadow ) this.shadow = source.shadow;
-
- if ( source.ao ) this.ao = source.ao;
-
- if ( source.emissive ) this.emissive = source.emissive;
- if ( source.ambient ) this.ambient = source.ambient;
-
- if ( source.environment ) this.environment = source.environment;
-
-};
-
-StandardNode.prototype.toJSON = function ( meta ) {
-
- var data = this.getJSONNode( meta );
-
- if ( ! data ) {
-
- data = this.createJSONNode( meta );
-
- // vertex
-
- if ( this.position ) data.position = this.position.toJSON( meta ).uuid;
-
- // fragment
-
- data.color = this.color.toJSON( meta ).uuid;
- data.roughness = this.roughness.toJSON( meta ).uuid;
- data.metalness = this.metalness.toJSON( meta ).uuid;
-
- if ( this.mask ) data.mask = this.mask.toJSON( meta ).uuid;
-
- if ( this.alpha ) data.alpha = this.alpha.toJSON( meta ).uuid;
-
- if ( this.normal ) data.normal = this.normal.toJSON( meta ).uuid;
-
- if ( this.clearCoat ) data.clearCoat = this.clearCoat.toJSON( meta ).uuid;
- if ( this.clearCoatRoughness ) data.clearCoatRoughness = this.clearCoatRoughness.toJSON( meta ).uuid;
-
- if ( this.reflectivity ) data.reflectivity = this.reflectivity.toJSON( meta ).uuid;
-
- if ( this.light ) data.light = this.light.toJSON( meta ).uuid;
- if ( this.shadow ) data.shadow = this.shadow.toJSON( meta ).uuid;
-
- if ( this.ao ) data.ao = this.ao.toJSON( meta ).uuid;
-
- if ( this.emissive ) data.emissive = this.emissive.toJSON( meta ).uuid;
- if ( this.ambient ) data.ambient = this.ambient.toJSON( meta ).uuid;
-
- if ( this.environment ) data.environment = this.environment.toJSON( meta ).uuid;
-
- }
-
- return data;
-
-};
-
-export { StandardNode };
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/constants.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/constants.d.ts
deleted file mode 100644
index 736669f945def6c65353ce98085662cb54dea6ef..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/constants.d.ts
+++ /dev/null
@@ -1,235 +0,0 @@
-export const REVISION: string;
-
-// https://developer.mozilla.org/en-US/docs/Web/API/MouseEvent.button
-export enum MOUSE {
- LEFT,
- MIDDLE,
- RIGHT,
-}
-
-// GL STATE CONSTANTS
-export enum CullFace {}
-export const CullFaceNone: CullFace;
-export const CullFaceBack: CullFace;
-export const CullFaceFront: CullFace;
-export const CullFaceFrontBack: CullFace;
-
-export enum FrontFaceDirection {}
-export const FrontFaceDirectionCW: FrontFaceDirection;
-export const FrontFaceDirectionCCW: FrontFaceDirection;
-
-// Shadowing Type
-export enum ShadowMapType {}
-export const BasicShadowMap: ShadowMapType;
-export const PCFShadowMap: ShadowMapType;
-export const PCFSoftShadowMap: ShadowMapType;
-
-// MATERIAL CONSTANTS
-
-// side
-export enum Side {}
-export const FrontSide: Side;
-export const BackSide: Side;
-export const DoubleSide: Side;
-
-// shading
-export enum Shading {}
-export const FlatShading: Shading;
-export const SmoothShading: Shading;
-
-// colors
-export enum Colors {}
-export const NoColors: Colors;
-export const FaceColors: Colors;
-export const VertexColors: Colors;
-
-// blending modes
-export enum Blending {}
-export const NoBlending: Blending;
-export const NormalBlending: Blending;
-export const AdditiveBlending: Blending;
-export const SubtractiveBlending: Blending;
-export const MultiplyBlending: Blending;
-export const CustomBlending: Blending;
-
-// custom blending equations
-// (numbers start from 100 not to clash with other
-// mappings to OpenGL constants defined in Texture.js)
-export enum BlendingEquation {}
-export const AddEquation: BlendingEquation;
-export const SubtractEquation: BlendingEquation;
-export const ReverseSubtractEquation: BlendingEquation;
-export const MinEquation: BlendingEquation;
-export const MaxEquation: BlendingEquation;
-
-// custom blending destination factors
-export enum BlendingDstFactor {}
-export const ZeroFactor: BlendingDstFactor;
-export const OneFactor: BlendingDstFactor;
-export const SrcColorFactor: BlendingDstFactor;
-export const OneMinusSrcColorFactor: BlendingDstFactor;
-export const SrcAlphaFactor: BlendingDstFactor;
-export const OneMinusSrcAlphaFactor: BlendingDstFactor;
-export const DstAlphaFactor: BlendingDstFactor;
-export const OneMinusDstAlphaFactor: BlendingDstFactor;
-export const DstColorFactor: BlendingDstFactor;
-export const OneMinusDstColorFactor: BlendingDstFactor;
-
-// custom blending src factors
-export enum BlendingSrcFactor {}
-export const SrcAlphaSaturateFactor: BlendingSrcFactor;
-
-// depth modes
-export enum DepthModes {}
-export const NeverDepth: DepthModes;
-export const AlwaysDepth: DepthModes;
-export const LessDepth: DepthModes;
-export const LessEqualDepth: DepthModes;
-export const EqualDepth: DepthModes;
-export const GreaterEqualDepth: DepthModes;
-export const GreaterDepth: DepthModes;
-export const NotEqualDepth: DepthModes;
-
-// TEXTURE CONSTANTS
-// Operations
-export enum Combine {}
-export const MultiplyOperation: Combine;
-export const MixOperation: Combine;
-export const AddOperation: Combine;
-
-// Tone Mapping modes
-export enum ToneMapping {}
-export const NoToneMapping: ToneMapping;
-export const LinearToneMapping: ToneMapping;
-export const ReinhardToneMapping: ToneMapping;
-export const Uncharted2ToneMapping: ToneMapping;
-export const CineonToneMapping: ToneMapping;
-
-// Mapping modes
-export enum Mapping {}
-export const UVMapping: Mapping;
-export const CubeReflectionMapping: Mapping;
-export const CubeRefractionMapping: Mapping;
-export const EquirectangularReflectionMapping: Mapping;
-export const EquirectangularRefractionMapping: Mapping;
-export const SphericalReflectionMapping: Mapping;
-export const CubeUVReflectionMapping: Mapping;
-export const CubeUVRefractionMapping: Mapping;
-
-// Wrapping modes
-export enum Wrapping {}
-export const RepeatWrapping: Wrapping;
-export const ClampToEdgeWrapping: Wrapping;
-export const MirroredRepeatWrapping: Wrapping;
-
-// Filters
-export enum TextureFilter {}
-export const NearestFilter: TextureFilter;
-export const NearestMipMapNearestFilter: TextureFilter;
-export const NearestMipMapLinearFilter: TextureFilter;
-export const LinearFilter: TextureFilter;
-export const LinearMipMapNearestFilter: TextureFilter;
-export const LinearMipMapLinearFilter: TextureFilter;
-
-// Data types
-export enum TextureDataType {}
-export const UnsignedByteType: TextureDataType;
-export const ByteType: TextureDataType;
-export const ShortType: TextureDataType;
-export const UnsignedShortType: TextureDataType;
-export const IntType: TextureDataType;
-export const UnsignedIntType: TextureDataType;
-export const FloatType: TextureDataType;
-export const HalfFloatType: TextureDataType;
-
-// Pixel types
-export enum PixelType {}
-export const UnsignedShort4444Type: PixelType;
-export const UnsignedShort5551Type: PixelType;
-export const UnsignedShort565Type: PixelType;
-export const UnsignedInt248Type: PixelType;
-
-// Pixel formats
-export enum PixelFormat {}
-export const AlphaFormat: PixelFormat;
-export const RGBFormat: PixelFormat;
-export const RGBAFormat: PixelFormat;
-export const LuminanceFormat: PixelFormat;
-export const LuminanceAlphaFormat: PixelFormat;
-export const RGBEFormat: PixelFormat;
-export const DepthFormat: PixelFormat;
-export const DepthStencilFormat: PixelFormat;
-export const RedFormat: PixelFormat;
-
-// Compressed texture formats
-// DDS / ST3C Compressed texture formats
-export enum CompressedPixelFormat {}
-export const RGB_S3TC_DXT1_Format: CompressedPixelFormat;
-export const RGBA_S3TC_DXT1_Format: CompressedPixelFormat;
-export const RGBA_S3TC_DXT3_Format: CompressedPixelFormat;
-export const RGBA_S3TC_DXT5_Format: CompressedPixelFormat;
-
-// PVRTC compressed './texture formats
-export const RGB_PVRTC_4BPPV1_Format: CompressedPixelFormat;
-export const RGB_PVRTC_2BPPV1_Format: CompressedPixelFormat;
-export const RGBA_PVRTC_4BPPV1_Format: CompressedPixelFormat;
-export const RGBA_PVRTC_2BPPV1_Format: CompressedPixelFormat;
-
-// ETC compressed texture formats
-export const RGB_ETC1_Format: CompressedPixelFormat;
-
-// ASTC compressed texture formats
-export const RGBA_ASTC_4x4_Format: CompressedPixelFormat;
-export const RGBA_ASTC_5x4_Format: CompressedPixelFormat;
-export const RGBA_ASTC_5x5_Format: CompressedPixelFormat;
-export const RGBA_ASTC_6x5_Format: CompressedPixelFormat;
-export const RGBA_ASTC_6x6_Format: CompressedPixelFormat;
-export const RGBA_ASTC_8x5_Format: CompressedPixelFormat;
-export const RGBA_ASTC_8x6_Format: CompressedPixelFormat;
-export const RGBA_ASTC_8x8_Format: CompressedPixelFormat;
-export const RGBA_ASTC_10x5_Format: CompressedPixelFormat;
-export const RGBA_ASTC_10x6_Format: CompressedPixelFormat;
-export const RGBA_ASTC_10x8_Format: CompressedPixelFormat;
-export const RGBA_ASTC_10x10_Format: CompressedPixelFormat;
-export const RGBA_ASTC_12x10_Format: CompressedPixelFormat;
-export const RGBA_ASTC_12x12_Format: CompressedPixelFormat;
-
-// Loop styles for AnimationAction
-export enum AnimationActionLoopStyles {}
-export const LoopOnce: AnimationActionLoopStyles;
-export const LoopRepeat: AnimationActionLoopStyles;
-export const LoopPingPong: AnimationActionLoopStyles;
-
-// Interpolation
-export enum InterpolationModes {}
-export const InterpolateDiscrete: InterpolationModes;
-export const InterpolateLinear: InterpolationModes;
-export const InterpolateSmooth: InterpolationModes;
-
-// Interpolant ending modes
-export enum InterpolationEndingModes {}
-export const ZeroCurvatureEnding: InterpolationEndingModes;
-export const ZeroSlopeEnding: InterpolationEndingModes;
-export const WrapAroundEnding: InterpolationEndingModes;
-
-// Triangle Draw modes
-export enum TrianglesDrawModes {}
-export const TrianglesDrawMode: TrianglesDrawModes;
-export const TriangleStripDrawMode: TrianglesDrawModes;
-export const TriangleFanDrawMode: TrianglesDrawModes;
-
-// Texture Encodings
-export enum TextureEncoding {}
-export const LinearEncoding: TextureEncoding;
-export const sRGBEncoding: TextureEncoding;
-export const GammaEncoding: TextureEncoding;
-export const RGBEEncoding: TextureEncoding;
-export const LogLuvEncoding: TextureEncoding;
-export const RGBM7Encoding: TextureEncoding;
-export const RGBM16Encoding: TextureEncoding;
-export const RGBDEncoding: TextureEncoding;
-
-// Depth packing strategies
-export enum DepthPackingStrategies {}
-export const BasicDepthPacking: DepthPackingStrategies;
-export const RGBADepthPacking: DepthPackingStrategies;
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/loaders/Cache.js b/spaces/banana-projects/web3d/node_modules/three/src/loaders/Cache.js
deleted file mode 100644
index 9f63c8dfc67a7fc2f1da7e3a74ae074cb8674520..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/loaders/Cache.js
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- */
-
-var Cache = {
-
- enabled: false,
-
- files: {},
-
- add: function ( key, file ) {
-
- if ( this.enabled === false ) return;
-
- // console.log( 'THREE.Cache', 'Adding key:', key );
-
- this.files[ key ] = file;
-
- },
-
- get: function ( key ) {
-
- if ( this.enabled === false ) return;
-
- // console.log( 'THREE.Cache', 'Checking key:', key );
-
- return this.files[ key ];
-
- },
-
- remove: function ( key ) {
-
- delete this.files[ key ];
-
- },
-
- clear: function () {
-
- this.files = {};
-
- }
-
-};
-
-
-export { Cache };
diff --git a/spaces/bigjoker/stable-diffusion-webui/scripts/outpainting_mk_2.py b/spaces/bigjoker/stable-diffusion-webui/scripts/outpainting_mk_2.py
deleted file mode 100644
index 5d80b46cd3263ef0905514a761bb473441d8a1e7..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/scripts/outpainting_mk_2.py
+++ /dev/null
@@ -1,283 +0,0 @@
-import math
-
-import numpy as np
-import skimage
-
-import modules.scripts as scripts
-import gradio as gr
-from PIL import Image, ImageDraw
-
-from modules import images, processing, devices
-from modules.processing import Processed, process_images
-from modules.shared import opts, cmd_opts, state
-
-
-# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
-def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05):
- # helper fft routines that keep ortho normalization and auto-shift before and after fft
- def _fft2(data):
- if data.ndim > 2: # has channels
- out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
- for c in range(data.shape[2]):
- c_data = data[:, :, c]
- out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho")
- out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c])
- else: # one channel
- out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
- out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho")
- out_fft[:, :] = np.fft.ifftshift(out_fft[:, :])
-
- return out_fft
-
- def _ifft2(data):
- if data.ndim > 2: # has channels
- out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
- for c in range(data.shape[2]):
- c_data = data[:, :, c]
- out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho")
- out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c])
- else: # one channel
- out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
- out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho")
- out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :])
-
- return out_ifft
-
- def _get_gaussian_window(width, height, std=3.14, mode=0):
- window_scale_x = float(width / min(width, height))
- window_scale_y = float(height / min(width, height))
-
- window = np.zeros((width, height))
- x = (np.arange(width) / width * 2. - 1.) * window_scale_x
- for y in range(height):
- fy = (y / height * 2. - 1.) * window_scale_y
- if mode == 0:
- window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std)
- else:
- window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) # hey wait a minute that's not gaussian
-
- return window
-
- def _get_masked_window_rgb(np_mask_grey, hardness=1.):
- np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3))
- if hardness != 1.:
- hardened = np_mask_grey[:] ** hardness
- else:
- hardened = np_mask_grey[:]
- for c in range(3):
- np_mask_rgb[:, :, c] = hardened[:]
- return np_mask_rgb
-
- width = _np_src_image.shape[0]
- height = _np_src_image.shape[1]
- num_channels = _np_src_image.shape[2]
-
- np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
- np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
- img_mask = np_mask_grey > 1e-6
- ref_mask = np_mask_grey < 1e-3
-
- windowed_image = _np_src_image * (1. - _get_masked_window_rgb(np_mask_grey))
- windowed_image /= np.max(windowed_image)
- windowed_image += np.average(_np_src_image) * np_mask_rgb # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
-
- src_fft = _fft2(windowed_image) # get feature statistics from masked src img
- src_dist = np.absolute(src_fft)
- src_phase = src_fft / src_dist
-
- # create a generator with a static seed to make outpainting deterministic / only follow global seed
- rng = np.random.default_rng(0)
-
- noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
- noise_rgb = rng.random((width, height, num_channels))
- noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
- noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
- for c in range(num_channels):
- noise_rgb[:, :, c] += (1. - color_variation) * noise_grey
-
- noise_fft = _fft2(noise_rgb)
- for c in range(num_channels):
- noise_fft[:, :, c] *= noise_window
- noise_rgb = np.real(_ifft2(noise_fft))
- shaped_noise_fft = _fft2(noise_rgb)
- shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping
-
- brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now
- contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2.
-
- # scikit-image is used for histogram matching, very convenient!
- shaped_noise = np.real(_ifft2(shaped_noise_fft))
- shaped_noise -= np.min(shaped_noise)
- shaped_noise /= np.max(shaped_noise)
- shaped_noise[img_mask, :] = skimage.exposure.match_histograms(shaped_noise[img_mask, :] ** 1., contrast_adjusted_np_src[ref_mask, :], channel_axis=1)
- shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb
-
- matched_noise = shaped_noise[:]
-
- return np.clip(matched_noise, 0., 1.)
-
-
-
-class Script(scripts.Script):
- def title(self):
- return "Outpainting mk2"
-
- def show(self, is_img2img):
- return is_img2img
-
- def ui(self, is_img2img):
- if not is_img2img:
- return None
-
- info = gr.HTML("
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/Ek Daav Dhobi Pachad Full Marathi Movie Download.md b/spaces/bioriAsaeru/text-to-voice/Ek Daav Dhobi Pachad Full Marathi Movie Download.md
deleted file mode 100644
index d968bfe8c0ff0a6af7ba5c376ba54a9efa285ebc..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Ek Daav Dhobi Pachad Full Marathi Movie Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
How to Install Generic 28c-1 Printer Driver on Your PC
-
Generic 28c-1 is a printer driver that supports various models of color multifunctional devices from DEVELOP and Konica Minolta. It allows you to print, scan, fax and copy documents with ease and efficiency. In this article, we will show you how to download and install the Generic 28c-1 printer driver on your PC.
-
Step 1: Download the driver
-
The first step is to download the driver from the official website of DEVELOP or Konica Minolta. Depending on your device model and operating system, you can choose from different versions of the driver, such as PCL6, PostScript, XPS, PC-Fax, etc. You can also use the Automatic Driver Installer or the PPD for Applications if you have multiple products on your network and do not wish to install each separate driver.
Alternatively, you can use the search function on the websites to find the driver for your specific device model.
-
Step 2: Install the driver
-
The second step is to install the driver on your PC. The installation process may vary depending on the version of the driver you downloaded, but generally it involves the following steps:
-
-
Extract the downloaded file to a folder on your PC.
-
Run the Setup.exe or UPDSetup.exe file from the folder.
-
Follow the instructions on the screen to complete the installation.
-
Restart your PC if prompted.
-
-
If you are using the Automatic Driver Installer or the PPD for Applications, you may need to select your device model and connection type during the installation.
-
If you are using the PC-Fax driver, you may need to configure your fax settings after the installation.
-
Step 3: Enjoy your printer
-
The third step is to enjoy your printer. You can now print, scan, fax and copy documents with your device using the Generic 28c-1 printer driver. You can also access various features and settings of your device through the printer properties or preferences menu on your PC.
-
If you encounter any problems or have any questions about the driver, you can contact the support team of DEVELOP or Konica Minolta for assistance.
Step 4: Update the driver
-
The fourth step is to update the driver regularly to ensure optimal performance and compatibility. You can check for updates on the official website of DEVELOP or Konica Minolta, or use the Automatic Update function on your PC if available. Updating the driver can fix some bugs, improve some features, and add support for new devices or operating systems.
-
-
To update the driver manually, you can follow these steps:
-
-
Download the latest version of the driver from the website.
-
Uninstall the current version of the driver from your PC.
-
Install the new version of the driver following the same steps as before.
-
Restart your PC if prompted.
-
-
To update the driver automatically, you can follow these steps:
-
-
Open the printer properties or preferences menu on your PC.
-
Select the Automatic Update tab or option.
-
Enable the Automatic Update function and set the frequency and time of checking for updates.
-
Save the settings and close the menu.
-
-
The driver will automatically check for updates and install them when available.
-
Conclusion
-
In this article, we have shown you how to download and install the Generic 28c-1 printer driver on your PC. We have also explained how to update the driver regularly to ensure optimal performance and compatibility. We hope this article has been helpful and informative for you. If you have any feedback or suggestions, please let us know in the comments below. Thank you for reading!
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/boda/arabic-names-generator/model/layers.py b/spaces/boda/arabic-names-generator/model/layers.py
deleted file mode 100644
index b2e48415481ae6dd6c582bcfcefe74937b35aa45..0000000000000000000000000000000000000000
--- a/spaces/boda/arabic-names-generator/model/layers.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import torch
-
-
-
-class Linear():
- def __init__(self, in_n, out_n, bias=True) -> None:
- self.params = []
- self.have_bias = bias
- self.weight = torch.randn((in_n,out_n)) / (in_n**0.5)
- self.params.append(self.weight)
-
- self.bias = None
- if self.have_bias:
- self.bias = torch.zeros(out_n)
- self.params.append(self.bias)
-
- def __call__(self,x, is_training =True):
- self.is_training = is_training
-
- self.out = x @ self.params[0]
- if self.have_bias:
- self.out += self.params[1]
- return self.out
-
- def set_parameters(self,p):
- self.params = p
- # self.weight = p[0]
- # self.bias = p[1]
- # self.params = [p]
-
- def parameters(self):
- return self.params
-
-
-class BatchNorm():
- def __init__(self, in_n,eps=1e-5, momentum = 0.1) -> None:
- self.eps = eps
- self.is_training = True
- self.momentum = momentum
- self.running_mean = torch.zeros(in_n)
- self.running_std = torch.ones(in_n)
- self.gain = torch.ones(in_n)
- self.bias = torch.zeros(in_n)
- self.params = [self.gain , self.bias]
-
-
- def __call__(self, x, is_training= True):
-
- self.is_training = is_training
- if self.is_training:
- mean = x.mean(0,keepdims= True)
- ## unbiased??
- std = x.std(0,keepdims= True)
-
- self.out = self.params[0] * (x - mean / (std + self.eps**0.5)) + self.params[1]
-
- with torch.no_grad():
- self.running_mean = self.running_mean * (1- self.momentum) \
- + self.momentum * mean
- self.running_std = self.running_std * (1- self.momentum) \
- + self.momentum * std
-
- else:
- # print(self.running_mean , self.running_std)
- self.out = self.params[0] * (x - self.running_mean / (self.running_std + self.eps**0.5)) + self.params[1]
-
-
-
- return self.out
-
- def set_parameters(self,p):
- self.params = p
- # self.gain = p[0]
- # self.bias = p[1]
- # self.params = [self.gain , self.bias]
- def set_mean_std(self, conf):
- self.running_mean = conf[0]
- self.running_std = conf[1]
- def get_mean_std(self):
- return [self.running_mean, self.running_std]
-
- def parameters(self):
- return self.params
-
-class Activation():
- def __init__(self, activation='tanh'):
- self.params = []
- if activation == 'tanh':
- self.forward = self._forward_tanh
- elif activation == 'relu':
- self.forward = self._forward_relu
- else:
- raise Exception('Only tanh, and relu activations are supported')
-
- def _forward_relu(self,x):
- return torch.relu(x)
- def _forward_tanh(self,x):
- return torch.tanh(x)
-
- def __call__(self, x, is_training= True):
-
- self.is_training = is_training
-
-
- self.out = self.forward(x)
- return self.out
-
- def set_parameters(self,p):
- self.params = p
- def parameters(self):
- return self.params
\ No newline at end of file
diff --git a/spaces/candlend/vits-hoshimi/sovits/hubert_model.py b/spaces/candlend/vits-hoshimi/sovits/hubert_model.py
deleted file mode 100644
index c3305041e32aef1a31ceaf566d6ce79711459352..0000000000000000000000000000000000000000
--- a/spaces/candlend/vits-hoshimi/sovits/hubert_model.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import copy
-import random
-from typing import Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as t_func
-from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
-
-
-class Hubert(nn.Module):
- def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
- super().__init__()
- self._mask = mask
- self.feature_extractor = FeatureExtractor()
- self.feature_projection = FeatureProjection()
- self.positional_embedding = PositionalConvEmbedding()
- self.norm = nn.LayerNorm(768)
- self.dropout = nn.Dropout(0.1)
- self.encoder = TransformerEncoder(
- nn.TransformerEncoderLayer(
- 768, 12, 3072, activation="gelu", batch_first=True
- ),
- 12,
- )
- self.proj = nn.Linear(768, 256)
-
- self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
- self.label_embedding = nn.Embedding(num_label_embeddings, 256)
-
- def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- mask = None
- if self.training and self._mask:
- mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
- x[mask] = self.masked_spec_embed.to(x.dtype)
- return x, mask
-
- def encode(
- self, x: torch.Tensor, layer: Optional[int] = None
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- x = self.feature_extractor(x)
- x = self.feature_projection(x.transpose(1, 2))
- x, mask = self.mask(x)
- x = x + self.positional_embedding(x)
- x = self.dropout(self.norm(x))
- x = self.encoder(x, output_layer=layer)
- return x, mask
-
- def logits(self, x: torch.Tensor) -> torch.Tensor:
- logits = torch.cosine_similarity(
- x.unsqueeze(2),
- self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
- dim=-1,
- )
- return logits / 0.1
-
- def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
- x, mask = self.encode(x)
- x = self.proj(x)
- logits = self.logits(x)
- return logits, mask
-
-
-class HubertSoft(Hubert):
- def __init__(self):
- super().__init__()
-
- @torch.inference_mode()
- def units(self, wav: torch.Tensor) -> torch.Tensor:
- wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
- x, _ = self.encode(wav)
- return self.proj(x)
-
-
-class FeatureExtractor(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
- self.norm0 = nn.GroupNorm(512, 512)
- self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
- self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
- self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = t_func.gelu(self.norm0(self.conv0(x)))
- x = t_func.gelu(self.conv1(x))
- x = t_func.gelu(self.conv2(x))
- x = t_func.gelu(self.conv3(x))
- x = t_func.gelu(self.conv4(x))
- x = t_func.gelu(self.conv5(x))
- x = t_func.gelu(self.conv6(x))
- return x
-
-
-class FeatureProjection(nn.Module):
- def __init__(self):
- super().__init__()
- self.norm = nn.LayerNorm(512)
- self.projection = nn.Linear(512, 768)
- self.dropout = nn.Dropout(0.1)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.norm(x)
- x = self.projection(x)
- x = self.dropout(x)
- return x
-
-
-class PositionalConvEmbedding(nn.Module):
- def __init__(self):
- super().__init__()
- self.conv = nn.Conv1d(
- 768,
- 768,
- kernel_size=128,
- padding=128 // 2,
- groups=16,
- )
- self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.conv(x.transpose(1, 2))
- x = t_func.gelu(x[:, :, :-1])
- return x.transpose(1, 2)
-
-
-class TransformerEncoder(nn.Module):
- def __init__(
- self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
- ) -> None:
- super(TransformerEncoder, self).__init__()
- self.layers = nn.ModuleList(
- [copy.deepcopy(encoder_layer) for _ in range(num_layers)]
- )
- self.num_layers = num_layers
-
- def forward(
- self,
- src: torch.Tensor,
- mask: torch.Tensor = None,
- src_key_padding_mask: torch.Tensor = None,
- output_layer: Optional[int] = None,
- ) -> torch.Tensor:
- output = src
- for layer in self.layers[:output_layer]:
- output = layer(
- output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
- )
- return output
-
-
-def _compute_mask(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- device: torch.device,
- min_masks: int = 0,
-) -> torch.Tensor:
- batch_size, sequence_length = shape
-
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
-
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
- )
-
- # compute number of masked spans in batch
- num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
- num_masked_spans = max(num_masked_spans, min_masks)
-
- # make sure num masked indices <= sequence_length
- if num_masked_spans * mask_length > sequence_length:
- num_masked_spans = sequence_length // mask_length
-
- # SpecAugment mask to fill
- mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
-
- # uniform distribution to sample from, make sure that offset samples are < sequence_length
- uniform_dist = torch.ones(
- (batch_size, sequence_length - (mask_length - 1)), device=device
- )
-
- # get random indices to mask
- mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
-
- # expand masked indices to masked spans
- mask_indices = (
- mask_indices.unsqueeze(dim=-1)
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- offsets = (
- torch.arange(mask_length, device=device)[None, None, :]
- .expand((batch_size, num_masked_spans, mask_length))
- .reshape(batch_size, num_masked_spans * mask_length)
- )
- mask_idxs = mask_indices + offsets
-
- # scatter indices to mask
- mask = mask.scatter(1, mask_idxs, True)
-
- return mask
-
-
-def hubert_soft(
- path: str
-) -> HubertSoft:
- r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
- Args:
- path (str): path of a pretrained model
- """
- dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- # dev = torch.device("cpu")
- hubert = HubertSoft()
- checkpoint = torch.load(path)
- consume_prefix_in_state_dict_if_present(checkpoint, "module.")
- hubert.load_state_dict(checkpoint)
- hubert.eval().to(dev)
- return hubert
diff --git a/spaces/caojiachen1/ChatGPT/crazy_functions/test_project/latex/attention/introduction.tex b/spaces/caojiachen1/ChatGPT/crazy_functions/test_project/latex/attention/introduction.tex
deleted file mode 100644
index 1baa8915f4cf7aec2520894a87470fc9436d954b..0000000000000000000000000000000000000000
--- a/spaces/caojiachen1/ChatGPT/crazy_functions/test_project/latex/attention/introduction.tex
+++ /dev/null
@@ -1,18 +0,0 @@
-Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
-
-Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
-%\marginpar{not sure if the memory constraints are understandable here}
-Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
-
-%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
-
-Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
-
-%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
-%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
-
-In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
-%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
-
-% Just a standard paragraph with citations, rewrite.
-%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
\ No newline at end of file
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/query_db.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/query_db.py
deleted file mode 100644
index 8b2745ebbc4206441d8af6ac0bc4f1f74faf4d20..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/query_db.py
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import argparse
-import logging
-import os
-import sys
-from timeit import default_timer as timer
-from typing import Any, ClassVar, Dict, List
-import torch
-
-from detectron2.data.catalog import DatasetCatalog
-from detectron2.utils.file_io import PathManager
-from detectron2.utils.logger import setup_logger
-
-from densepose.structures import DensePoseDataRelative
-from densepose.utils.dbhelper import EntrySelector
-from densepose.utils.logger import verbosity_to_level
-from densepose.vis.base import CompoundVisualizer
-from densepose.vis.bounding_box import BoundingBoxVisualizer
-from densepose.vis.densepose_data_points import (
- DensePoseDataCoarseSegmentationVisualizer,
- DensePoseDataPointsIVisualizer,
- DensePoseDataPointsUVisualizer,
- DensePoseDataPointsVisualizer,
- DensePoseDataPointsVVisualizer,
-)
-
-DOC = """Query DB - a tool to print / visualize data from a database
-"""
-
-LOGGER_NAME = "query_db"
-
-logger = logging.getLogger(LOGGER_NAME)
-
-_ACTION_REGISTRY: Dict[str, "Action"] = {}
-
-
-class Action(object):
- @classmethod
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
- parser.add_argument(
- "-v",
- "--verbosity",
- action="count",
- help="Verbose mode. Multiple -v options increase the verbosity.",
- )
-
-
-def register_action(cls: type):
- """
- Decorator for action classes to automate action registration
- """
- global _ACTION_REGISTRY
- _ACTION_REGISTRY[cls.COMMAND] = cls
- return cls
-
-
-class EntrywiseAction(Action):
- @classmethod
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
- super(EntrywiseAction, cls).add_arguments(parser)
- parser.add_argument(
- "dataset", metavar="", help="Dataset name (e.g. densepose_coco_2014_train)"
- )
- parser.add_argument(
- "selector",
- metavar="",
- help="Dataset entry selector in the form field1[:type]=value1[,"
- "field2[:type]=value_min-value_max...] which selects all "
- "entries from the dataset that satisfy the constraints",
- )
- parser.add_argument(
- "--max-entries", metavar="N", help="Maximum number of entries to process", type=int
- )
-
- @classmethod
- def execute(cls: type, args: argparse.Namespace):
- dataset = setup_dataset(args.dataset)
- entry_selector = EntrySelector.from_string(args.selector)
- context = cls.create_context(args)
- if args.max_entries is not None:
- for _, entry in zip(range(args.max_entries), dataset):
- if entry_selector(entry):
- cls.execute_on_entry(entry, context)
- else:
- for entry in dataset:
- if entry_selector(entry):
- cls.execute_on_entry(entry, context)
-
- @classmethod
- def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
- context = {}
- return context
-
-
-@register_action
-class PrintAction(EntrywiseAction):
- """
- Print action that outputs selected entries to stdout
- """
-
- COMMAND: ClassVar[str] = "print"
-
- @classmethod
- def add_parser(cls: type, subparsers: argparse._SubParsersAction):
- parser = subparsers.add_parser(cls.COMMAND, help="Output selected entries to stdout. ")
- cls.add_arguments(parser)
- parser.set_defaults(func=cls.execute)
-
- @classmethod
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
- super(PrintAction, cls).add_arguments(parser)
-
- @classmethod
- def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]):
- import pprint
-
- printer = pprint.PrettyPrinter(indent=2, width=200, compact=True)
- printer.pprint(entry)
-
-
-@register_action
-class ShowAction(EntrywiseAction):
- """
- Show action that visualizes selected entries on an image
- """
-
- COMMAND: ClassVar[str] = "show"
- VISUALIZERS: ClassVar[Dict[str, object]] = {
- "dp_segm": DensePoseDataCoarseSegmentationVisualizer(),
- "dp_i": DensePoseDataPointsIVisualizer(),
- "dp_u": DensePoseDataPointsUVisualizer(),
- "dp_v": DensePoseDataPointsVVisualizer(),
- "dp_pts": DensePoseDataPointsVisualizer(),
- "bbox": BoundingBoxVisualizer(),
- }
-
- @classmethod
- def add_parser(cls: type, subparsers: argparse._SubParsersAction):
- parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
- cls.add_arguments(parser)
- parser.set_defaults(func=cls.execute)
-
- @classmethod
- def add_arguments(cls: type, parser: argparse.ArgumentParser):
- super(ShowAction, cls).add_arguments(parser)
- parser.add_argument(
- "visualizations",
- metavar="",
- help="Comma separated list of visualizations, possible values: "
- "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
- )
- parser.add_argument(
- "--output",
- metavar="",
- default="output.png",
- help="File name to save output to",
- )
-
- @classmethod
- def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]):
- import cv2
- import numpy as np
-
- image_fpath = PathManager.get_local_path(entry["file_name"])
- image = cv2.imread(image_fpath, cv2.IMREAD_GRAYSCALE)
- image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
- datas = cls._extract_data_for_visualizers_from_entry(context["vis_specs"], entry)
- visualizer = context["visualizer"]
- image_vis = visualizer.visualize(image, datas)
- entry_idx = context["entry_idx"] + 1
- out_fname = cls._get_out_fname(entry_idx, context["out_fname"])
- cv2.imwrite(out_fname, image_vis)
- logger.info(f"Output saved to {out_fname}")
- context["entry_idx"] += 1
-
- @classmethod
- def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
- base, ext = os.path.splitext(fname_base)
- return base + ".{0:04d}".format(entry_idx) + ext
-
- @classmethod
- def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]:
- vis_specs = args.visualizations.split(",")
- visualizers = []
- for vis_spec in vis_specs:
- vis = cls.VISUALIZERS[vis_spec]
- visualizers.append(vis)
- context = {
- "vis_specs": vis_specs,
- "visualizer": CompoundVisualizer(visualizers),
- "out_fname": args.output,
- "entry_idx": 0,
- }
- return context
-
- @classmethod
- def _extract_data_for_visualizers_from_entry(
- cls: type, vis_specs: List[str], entry: Dict[str, Any]
- ):
- dp_list = []
- bbox_list = []
- for annotation in entry["annotations"]:
- is_valid, _ = DensePoseDataRelative.validate_annotation(annotation)
- if not is_valid:
- continue
- bbox = torch.as_tensor(annotation["bbox"])
- bbox_list.append(bbox)
- dp_data = DensePoseDataRelative(annotation)
- dp_list.append(dp_data)
- datas = []
- for vis_spec in vis_specs:
- datas.append(bbox_list if "bbox" == vis_spec else (bbox_list, dp_list))
- return datas
-
-
-def setup_dataset(dataset_name):
- logger.info("Loading dataset {}".format(dataset_name))
- start = timer()
- dataset = DatasetCatalog.get(dataset_name)
- stop = timer()
- logger.info("Loaded dataset {} in {:.3f}s".format(dataset_name, stop - start))
- return dataset
-
-
-def create_argument_parser() -> argparse.ArgumentParser:
- parser = argparse.ArgumentParser(
- description=DOC,
- formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
- )
- parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
- subparsers = parser.add_subparsers(title="Actions")
- for _, action in _ACTION_REGISTRY.items():
- action.add_parser(subparsers)
- return parser
-
-
-def main():
- parser = create_argument_parser()
- args = parser.parse_args()
- verbosity = args.verbosity if hasattr(args, "verbosity") else None
- global logger
- logger = setup_logger(name=LOGGER_NAME)
- logger.setLevel(verbosity_to_level(verbosity))
- args.func(args)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/ccarr0807/HuggingGPT/models_server.py b/spaces/ccarr0807/HuggingGPT/models_server.py
deleted file mode 100644
index 034d221883220c053a0cb445ffef403b232fcdee..0000000000000000000000000000000000000000
--- a/spaces/ccarr0807/HuggingGPT/models_server.py
+++ /dev/null
@@ -1,618 +0,0 @@
-import argparse
-import logging
-import random
-import uuid
-import numpy as np
-from transformers import pipeline
-from diffusers import DiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
-from diffusers.utils import load_image
-from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
-from diffusers.utils import export_to_video
-from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5ForSpeechToSpeech
-from transformers import BlipProcessor, BlipForConditionalGeneration
-from transformers import TrOCRProcessor, VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
-from datasets import load_dataset
-from PIL import Image
-import io
-from torchvision import transforms
-import torch
-import torchaudio
-from speechbrain.pretrained import WaveformEnhancement
-import joblib
-from huggingface_hub import hf_hub_url, cached_download
-from transformers import AutoImageProcessor, TimesformerForVideoClassification
-from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation, AutoFeatureExtractor
-from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector, CannyDetector, MidasDetector
-from controlnet_aux.open_pose.body import Body
-from controlnet_aux.mlsd.models.mbv2_mlsd_large import MobileV2_MLSD_Large
-from controlnet_aux.hed import Network
-from transformers import DPTForDepthEstimation, DPTFeatureExtractor
-import warnings
-import time
-from espnet2.bin.tts_inference import Text2Speech
-import soundfile as sf
-from asteroid.models import BaseModel
-import traceback
-import os
-import yaml
-
-warnings.filterwarnings("ignore")
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--config", type=str, default="config.yaml")
-args = parser.parse_args()
-
-if __name__ != "__main__":
- args.config = "config.gradio.yaml"
-
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.INFO)
-handler = logging.StreamHandler()
-handler.setLevel(logging.INFO)
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-handler.setFormatter(formatter)
-logger.addHandler(handler)
-
-config = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
-
-local_deployment = config["local_deployment"]
-if config["inference_mode"] == "huggingface":
- local_deployment = "none"
-
-PROXY = None
-if config["proxy"]:
- PROXY = {
- "https": config["proxy"],
- }
-
-start = time.time()
-
-# local_models = "models/"
-local_models = ""
-
-
-def load_pipes(local_deployment):
- other_pipes = {}
- standard_pipes = {}
- controlnet_sd_pipes = {}
- if local_deployment in ["full"]:
- other_pipes = {
-
- # "Salesforce/blip-image-captioning-large": {
- # "model": BlipForConditionalGeneration.from_pretrained(f"Salesforce/blip-image-captioning-large"),
- # "processor": BlipProcessor.from_pretrained(f"Salesforce/blip-image-captioning-large"),
- # "device": "cuda:0"
- # },
- "damo-vilab/text-to-video-ms-1.7b": {
- "model": DiffusionPipeline.from_pretrained(f"{local_models}damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16"),
- "device": "cuda:0"
- },
- # "facebook/maskformer-swin-large-ade": {
- # "model": MaskFormerForInstanceSegmentation.from_pretrained(f"facebook/maskformer-swin-large-ade"),
- # "feature_extractor" : AutoFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-ade"),
- # "device": "cuda:0"
- # },
- # "microsoft/trocr-base-printed": {
- # "processor": TrOCRProcessor.from_pretrained(f"microsoft/trocr-base-printed"),
- # "model": VisionEncoderDecoderModel.from_pretrained(f"microsoft/trocr-base-printed"),
- # "device": "cuda:0"
- # },
- # "microsoft/trocr-base-handwritten": {
- # "processor": TrOCRProcessor.from_pretrained(f"microsoft/trocr-base-handwritten"),
- # "model": VisionEncoderDecoderModel.from_pretrained(f"microsoft/trocr-base-handwritten"),
- # "device": "cuda:0"
- # },
- "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k": {
- "model": BaseModel.from_pretrained("JorisCos/DCCRNet_Libri1Mix_enhsingle_16k"),
- "device": "cuda:0"
- },
-
- # "CompVis/stable-diffusion-v1-4": {
- # "model": DiffusionPipeline.from_pretrained(f"CompVis/stable-diffusion-v1-4"),
- # "device": "cuda:0"
- # },
- # "stabilityai/stable-diffusion-2-1": {
- # "model": DiffusionPipeline.from_pretrained(f"stabilityai/stable-diffusion-2-1"),
- # "device": "cuda:0"
- # },
-
- # "microsoft/speecht5_tts":{
- # "processor": SpeechT5Processor.from_pretrained(f"microsoft/speecht5_tts"),
- # "model": SpeechT5ForTextToSpeech.from_pretrained(f"microsoft/speecht5_tts"),
- # "vocoder": SpeechT5HifiGan.from_pretrained(f"microsoft/speecht5_hifigan"),
- # "embeddings_dataset": load_dataset(f"Matthijs/cmu-arctic-xvectors", split="validation"),
- # "device": "cuda:0"
- # },
- # "speechbrain/mtl-mimic-voicebank": {
- # "model": WaveformEnhancement.from_hparams(source="speechbrain/mtl-mimic-voicebank", savedir="models/mtl-mimic-voicebank"),
- # "device": "cuda:0"
- # },
- "microsoft/speecht5_vc":{
- "processor": SpeechT5Processor.from_pretrained(f"{local_models}microsoft/speecht5_vc"),
- "model": SpeechT5ForSpeechToSpeech.from_pretrained(f"{local_models}microsoft/speecht5_vc"),
- "vocoder": SpeechT5HifiGan.from_pretrained(f"{local_models}microsoft/speecht5_hifigan"),
- "embeddings_dataset": load_dataset(f"{local_models}Matthijs/cmu-arctic-xvectors", split="validation"),
- "device": "cuda:0"
- },
- # "julien-c/wine-quality": {
- # "model": joblib.load(cached_download(hf_hub_url("julien-c/wine-quality", "sklearn_model.joblib")))
- # },
- # "facebook/timesformer-base-finetuned-k400": {
- # "processor": AutoImageProcessor.from_pretrained(f"facebook/timesformer-base-finetuned-k400"),
- # "model": TimesformerForVideoClassification.from_pretrained(f"facebook/timesformer-base-finetuned-k400"),
- # "device": "cuda:0"
- # },
- "facebook/maskformer-swin-base-coco": {
- "feature_extractor": MaskFormerFeatureExtractor.from_pretrained(f"{local_models}facebook/maskformer-swin-base-coco"),
- "model": MaskFormerForInstanceSegmentation.from_pretrained(f"{local_models}facebook/maskformer-swin-base-coco"),
- "device": "cuda:0"
- },
- "Intel/dpt-hybrid-midas": {
- "model": DPTForDepthEstimation.from_pretrained(f"{local_models}Intel/dpt-hybrid-midas", low_cpu_mem_usage=True),
- "feature_extractor": DPTFeatureExtractor.from_pretrained(f"{local_models}Intel/dpt-hybrid-midas"),
- "device": "cuda:0"
- }
- }
-
- if local_deployment in ["full", "standard"]:
- standard_pipes = {
- # "nlpconnect/vit-gpt2-image-captioning":{
- # "model": VisionEncoderDecoderModel.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
- # "feature_extractor": ViTImageProcessor.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
- # "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
- # "device": "cuda:0"
- # },
- "espnet/kan-bayashi_ljspeech_vits": {
- "model": Text2Speech.from_pretrained("espnet/kan-bayashi_ljspeech_vits"),
- "device": "cuda:0"
- },
- # "lambdalabs/sd-image-variations-diffusers": {
- # "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
- # "device": "cuda:0"
- # },
- "runwayml/stable-diffusion-v1-5": {
- "model": DiffusionPipeline.from_pretrained(f"{local_models}runwayml/stable-diffusion-v1-5"),
- "device": "cuda:0"
- },
- # "superb/wav2vec2-base-superb-ks": {
- # "model": pipeline(task="audio-classification", model=f"superb/wav2vec2-base-superb-ks"),
- # "device": "cuda:0"
- # },
- "openai/whisper-base": {
- "model": pipeline(task="automatic-speech-recognition", model=f"{local_models}openai/whisper-base"),
- "device": "cuda:0"
- },
- # "microsoft/speecht5_asr": {
- # "model": pipeline(task="automatic-speech-recognition", model=f"{local_models}microsoft/speecht5_asr"),
- # "device": "cuda:0"
- # },
- "Intel/dpt-large": {
- "model": pipeline(task="depth-estimation", model=f"{local_models}Intel/dpt-large"),
- "device": "cuda:0"
- },
- # "microsoft/beit-base-patch16-224-pt22k-ft22k": {
- # "model": pipeline(task="image-classification", model=f"microsoft/beit-base-patch16-224-pt22k-ft22k"),
- # "device": "cuda:0"
- # },
- "facebook/detr-resnet-50-panoptic": {
- "model": pipeline(task="image-segmentation", model=f"{local_models}facebook/detr-resnet-50-panoptic"),
- "device": "cuda:0"
- },
- "facebook/detr-resnet-101": {
- "model": pipeline(task="object-detection", model=f"{local_models}facebook/detr-resnet-101"),
- "device": "cuda:0"
- },
- # "openai/clip-vit-large-patch14": {
- # "model": pipeline(task="zero-shot-image-classification", model=f"openai/clip-vit-large-patch14"),
- # "device": "cuda:0"
- # },
- # "google/owlvit-base-patch32": {
- # "model": pipeline(task="zero-shot-object-detection", model=f"{local_models}google/owlvit-base-patch32"),
- # "device": "cuda:0"
- # },
- # "microsoft/DialoGPT-medium": {
- # "model": pipeline(task="conversational", model=f"microsoft/DialoGPT-medium"),
- # "device": "cuda:0"
- # },
- # "bert-base-uncased": {
- # "model": pipeline(task="fill-mask", model=f"bert-base-uncased"),
- # "device": "cuda:0"
- # },
- # "deepset/roberta-base-squad2": {
- # "model": pipeline(task = "question-answering", model=f"deepset/roberta-base-squad2"),
- # "device": "cuda:0"
- # },
- # "facebook/bart-large-cnn": {
- # "model": pipeline(task="summarization", model=f"facebook/bart-large-cnn"),
- # "device": "cuda:0"
- # },
- # "google/tapas-base-finetuned-wtq": {
- # "model": pipeline(task="table-question-answering", model=f"google/tapas-base-finetuned-wtq"),
- # "device": "cuda:0"
- # },
- # "distilbert-base-uncased-finetuned-sst-2-english": {
- # "model": pipeline(task="text-classification", model=f"distilbert-base-uncased-finetuned-sst-2-english"),
- # "device": "cuda:0"
- # },
- # "gpt2": {
- # "model": pipeline(task="text-generation", model="gpt2"),
- # "device": "cuda:0"
- # },
- # "mrm8488/t5-base-finetuned-question-generation-ap": {
- # "model": pipeline(task="text2text-generation", model=f"mrm8488/t5-base-finetuned-question-generation-ap"),
- # "device": "cuda:0"
- # },
- # "Jean-Baptiste/camembert-ner": {
- # "model": pipeline(task="token-classification", model=f"Jean-Baptiste/camembert-ner", aggregation_strategy="simple"),
- # "device": "cuda:0"
- # },
- # "t5-base": {
- # "model": pipeline(task="translation", model=f"t5-base"),
- # "device": "cuda:0"
- # },
- "impira/layoutlm-document-qa": {
- "model": pipeline(task="document-question-answering", model=f"{local_models}impira/layoutlm-document-qa"),
- "device": "cuda:0"
- },
- "ydshieh/vit-gpt2-coco-en": {
- "model": pipeline(task="image-to-text", model=f"{local_models}ydshieh/vit-gpt2-coco-en"),
- "device": "cuda:0"
- },
- "dandelin/vilt-b32-finetuned-vqa": {
- "model": pipeline(task="visual-question-answering", model=f"{local_models}dandelin/vilt-b32-finetuned-vqa"),
- "device": "cuda:0"
- }
- }
-
- if local_deployment in ["full", "standard", "minimal"]:
-
- controlnet = ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
- controlnetpipe = StableDiffusionControlNetPipeline.from_pretrained(
- f"{local_models}runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
- )
-
-
- hed_network = HEDdetector.from_pretrained('lllyasviel/ControlNet')
-
- controlnet_sd_pipes = {
- "openpose-control": {
- "model": OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
- },
- "mlsd-control": {
- "model": MLSDdetector.from_pretrained('lllyasviel/ControlNet')
- },
- "hed-control": {
- "model": hed_network
- },
- "scribble-control": {
- "model": hed_network
- },
- "midas-control": {
- "model": MidasDetector.from_pretrained('lllyasviel/ControlNet')
- },
- "canny-control": {
- "model": CannyDetector()
- },
- "lllyasviel/sd-controlnet-canny":{
- "control": controlnet,
- "model": controlnetpipe,
- "device": "cuda:0"
- },
- "lllyasviel/sd-controlnet-depth":{
- "control": ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16),
- "model": controlnetpipe,
- "device": "cuda:0"
- },
- "lllyasviel/sd-controlnet-hed":{
- "control": ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-hed", torch_dtype=torch.float16),
- "model": controlnetpipe,
- "device": "cuda:0"
- },
- "lllyasviel/sd-controlnet-mlsd":{
- "control": ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-mlsd", torch_dtype=torch.float16),
- "model": controlnetpipe,
- "device": "cuda:0"
- },
- "lllyasviel/sd-controlnet-openpose":{
- "control": ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16),
- "model": controlnetpipe,
- "device": "cuda:0"
- },
- "lllyasviel/sd-controlnet-scribble":{
- "control": ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16),
- "model": controlnetpipe,
- "device": "cuda:0"
- },
- "lllyasviel/sd-controlnet-seg":{
- "control": ControlNetModel.from_pretrained(f"{local_models}lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16),
- "model": controlnetpipe,
- "device": "cuda:0"
- }
- }
- pipes = {**standard_pipes, **other_pipes, **controlnet_sd_pipes}
- return pipes
-
-pipes = load_pipes(local_deployment)
-
-end = time.time()
-during = end - start
-
-print(f"[ ready ] {during}s")
-
-def running():
- return {"running": True}
-
-def status(model_id):
- disabled_models = ["microsoft/trocr-base-printed", "microsoft/trocr-base-handwritten"]
- if model_id in pipes.keys() and model_id not in disabled_models:
- print(f"[ check {model_id} ] success")
- return {"loaded": True}
- else:
- print(f"[ check {model_id} ] failed")
- return {"loaded": False}
-
-def models(model_id, data):
- while "using" in pipes[model_id] and pipes[model_id]["using"]:
- print(f"[ inference {model_id} ] waiting")
- time.sleep(0.1)
- pipes[model_id]["using"] = True
- print(f"[ inference {model_id} ] start")
-
- start = time.time()
-
- pipe = pipes[model_id]["model"]
-
- if "device" in pipes[model_id]:
- try:
- pipe.to(pipes[model_id]["device"])
- except:
- pipe.device = torch.device(pipes[model_id]["device"])
- pipe.model.to(pipes[model_id]["device"])
-
- result = None
- try:
- # text to video
- if model_id == "damo-vilab/text-to-video-ms-1.7b":
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
- # pipe.enable_model_cpu_offload()
- prompt = data["text"]
- video_frames = pipe(prompt, num_inference_steps=50, num_frames=40).frames
- file_name = str(uuid.uuid4())[:4]
- video_path = export_to_video(video_frames, f"public/videos/{file_name}.mp4")
-
- new_file_name = str(uuid.uuid4())[:4]
- os.system(f"ffmpeg -i {video_path} -vcodec libx264 public/videos/{new_file_name}.mp4")
-
- if os.path.exists(f"public/videos/{new_file_name}.mp4"):
- result = {"path": f"/videos/{new_file_name}.mp4"}
- else:
- result = {"path": f"/videos/{file_name}.mp4"}
-
- # controlnet
- if model_id.startswith("lllyasviel/sd-controlnet-"):
- pipe.controlnet.to('cpu')
- pipe.controlnet = pipes[model_id]["control"].to(pipes[model_id]["device"])
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
- control_image = load_image(data["img_url"])
- # generator = torch.manual_seed(66)
- out_image: Image = pipe(data["text"], num_inference_steps=20, image=control_image).images[0]
- file_name = str(uuid.uuid4())[:4]
- out_image.save(f"public/images/{file_name}.png")
- result = {"path": f"/images/{file_name}.png"}
-
- if model_id.endswith("-control"):
- image = load_image(data["img_url"])
- if "scribble" in model_id:
- control = pipe(image, scribble = True)
- elif "canny" in model_id:
- control = pipe(image, low_threshold=100, high_threshold=200)
- else:
- control = pipe(image)
- file_name = str(uuid.uuid4())[:4]
- control.save(f"public/images/{file_name}.png")
- result = {"path": f"/images/{file_name}.png"}
-
- # image to image
- if model_id == "lambdalabs/sd-image-variations-diffusers":
- im = load_image(data["img_url"])
- file_name = str(uuid.uuid4())[:4]
- with open(f"public/images/{file_name}.png", "wb") as f:
- f.write(data)
- tform = transforms.Compose([
- transforms.ToTensor(),
- transforms.Resize(
- (224, 224),
- interpolation=transforms.InterpolationMode.BICUBIC,
- antialias=False,
- ),
- transforms.Normalize(
- [0.48145466, 0.4578275, 0.40821073],
- [0.26862954, 0.26130258, 0.27577711]),
- ])
- inp = tform(im).to(pipes[model_id]["device"]).unsqueeze(0)
- out = pipe(inp, guidance_scale=3)
- out["images"][0].save(f"public/images/{file_name}.jpg")
- result = {"path": f"/images/{file_name}.jpg"}
-
- # image to text
- if model_id == "Salesforce/blip-image-captioning-large":
- raw_image = load_image(data["img_url"]).convert('RGB')
- text = data["text"]
- inputs = pipes[model_id]["processor"](raw_image, return_tensors="pt").to(pipes[model_id]["device"])
- out = pipe.generate(**inputs)
- caption = pipes[model_id]["processor"].decode(out[0], skip_special_tokens=True)
- result = {"generated text": caption}
- if model_id == "ydshieh/vit-gpt2-coco-en":
- img_url = data["img_url"]
- generated_text = pipe(img_url)[0]['generated_text']
- result = {"generated text": generated_text}
- if model_id == "nlpconnect/vit-gpt2-image-captioning":
- image = load_image(data["img_url"]).convert("RGB")
- pixel_values = pipes[model_id]["feature_extractor"](images=image, return_tensors="pt").pixel_values
- pixel_values = pixel_values.to(pipes[model_id]["device"])
- generated_ids = pipe.generate(pixel_values, **{"max_length": 200, "num_beams": 1})
- generated_text = pipes[model_id]["tokenizer"].batch_decode(generated_ids, skip_special_tokens=True)[0]
- result = {"generated text": generated_text}
- # image to text: OCR
- if model_id == "microsoft/trocr-base-printed" or model_id == "microsoft/trocr-base-handwritten":
- image = load_image(data["img_url"]).convert("RGB")
- pixel_values = pipes[model_id]["processor"](image, return_tensors="pt").pixel_values
- pixel_values = pixel_values.to(pipes[model_id]["device"])
- generated_ids = pipe.generate(pixel_values)
- generated_text = pipes[model_id]["processor"].batch_decode(generated_ids, skip_special_tokens=True)[0]
- result = {"generated text": generated_text}
-
- # text to image
- if model_id == "runwayml/stable-diffusion-v1-5":
- file_name = str(uuid.uuid4())[:4]
- text = data["text"]
- out = pipe(prompt=text)
- out["images"][0].save(f"public/images/{file_name}.jpg")
- result = {"path": f"/images/{file_name}.jpg"}
-
- # object detection
- if model_id == "google/owlvit-base-patch32" or model_id == "facebook/detr-resnet-101":
- img_url = data["img_url"]
- open_types = ["cat", "couch", "person", "car", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird"]
- result = pipe(img_url, candidate_labels=open_types)
-
- # VQA
- if model_id == "dandelin/vilt-b32-finetuned-vqa":
- question = data["text"]
- img_url = data["img_url"]
- result = pipe(question=question, image=img_url)
-
- #DQA
- if model_id == "impira/layoutlm-document-qa":
- question = data["text"]
- img_url = data["img_url"]
- result = pipe(img_url, question)
-
- # depth-estimation
- if model_id == "Intel/dpt-large":
- output = pipe(data["img_url"])
- image = output['depth']
- name = str(uuid.uuid4())[:4]
- image.save(f"public/images/{name}.jpg")
- result = {"path": f"/images/{name}.jpg"}
-
- if model_id == "Intel/dpt-hybrid-midas" and model_id == "Intel/dpt-large":
- image = load_image(data["img_url"])
- inputs = pipes[model_id]["feature_extractor"](images=image, return_tensors="pt")
- with torch.no_grad():
- outputs = pipe(**inputs)
- predicted_depth = outputs.predicted_depth
- prediction = torch.nn.functional.interpolate(
- predicted_depth.unsqueeze(1),
- size=image.size[::-1],
- mode="bicubic",
- align_corners=False,
- )
- output = prediction.squeeze().cpu().numpy()
- formatted = (output * 255 / np.max(output)).astype("uint8")
- image = Image.fromarray(formatted)
- name = str(uuid.uuid4())[:4]
- image.save(f"public/images/{name}.jpg")
- result = {"path": f"/images/{name}.jpg"}
-
- # TTS
- if model_id == "espnet/kan-bayashi_ljspeech_vits":
- text = data["text"]
- wav = pipe(text)["wav"]
- name = str(uuid.uuid4())[:4]
- sf.write(f"public/audios/{name}.wav", wav.cpu().numpy(), pipe.fs, "PCM_16")
- result = {"path": f"/audios/{name}.wav"}
-
- if model_id == "microsoft/speecht5_tts":
- text = data["text"]
- inputs = pipes[model_id]["processor"](text=text, return_tensors="pt")
- embeddings_dataset = pipes[model_id]["embeddings_dataset"]
- speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to(pipes[model_id]["device"])
- pipes[model_id]["vocoder"].to(pipes[model_id]["device"])
- speech = pipe.generate_speech(inputs["input_ids"].to(pipes[model_id]["device"]), speaker_embeddings, vocoder=pipes[model_id]["vocoder"])
- name = str(uuid.uuid4())[:4]
- sf.write(f"public/audios/{name}.wav", speech.cpu().numpy(), samplerate=16000)
- result = {"path": f"/audios/{name}.wav"}
-
- # ASR
- if model_id == "openai/whisper-base" or model_id == "microsoft/speecht5_asr":
- audio_url = data["audio_url"]
- result = { "text": pipe(audio_url)["text"]}
-
- # audio to audio
- if model_id == "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k":
- audio_url = data["audio_url"]
- wav, sr = torchaudio.load(audio_url)
- with torch.no_grad():
- result_wav = pipe(wav.to(pipes[model_id]["device"]))
- name = str(uuid.uuid4())[:4]
- sf.write(f"public/audios/{name}.wav", result_wav.cpu().squeeze().numpy(), sr)
- result = {"path": f"/audios/{name}.wav"}
-
- if model_id == "microsoft/speecht5_vc":
- audio_url = data["audio_url"]
- wav, sr = torchaudio.load(audio_url)
- inputs = pipes[model_id]["processor"](audio=wav, sampling_rate=sr, return_tensors="pt")
- embeddings_dataset = pipes[model_id]["embeddings_dataset"]
- speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
- pipes[model_id]["vocoder"].to(pipes[model_id]["device"])
- speech = pipe.generate_speech(inputs["input_ids"].to(pipes[model_id]["device"]), speaker_embeddings, vocoder=pipes[model_id]["vocoder"])
- name = str(uuid.uuid4())[:4]
- sf.write(f"public/audios/{name}.wav", speech.cpu().numpy(), samplerate=16000)
- result = {"path": f"/audios/{name}.wav"}
-
- # segmentation
- if model_id == "facebook/detr-resnet-50-panoptic":
- result = []
- segments = pipe(data["img_url"])
- image = load_image(data["img_url"])
-
- colors = []
- for i in range(len(segments)):
- colors.append((random.randint(100, 255), random.randint(100, 255), random.randint(100, 255), 50))
-
- for segment in segments:
- mask = segment["mask"]
- mask = mask.convert('L')
- layer = Image.new('RGBA', mask.size, colors[i])
- image.paste(layer, (0, 0), mask)
- name = str(uuid.uuid4())[:4]
- image.save(f"public/images/{name}.jpg")
- result = {"path": f"/images/{name}.jpg"}
-
- if model_id == "facebook/maskformer-swin-base-coco" or model_id == "facebook/maskformer-swin-large-ade":
- image = load_image(data["img_url"])
- inputs = pipes[model_id]["feature_extractor"](images=image, return_tensors="pt").to(pipes[model_id]["device"])
- outputs = pipe(**inputs)
- result = pipes[model_id]["feature_extractor"].post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
- predicted_panoptic_map = result["segmentation"].cpu().numpy()
- predicted_panoptic_map = Image.fromarray(predicted_panoptic_map.astype(np.uint8))
- name = str(uuid.uuid4())[:4]
- predicted_panoptic_map.save(f"public/images/{name}.jpg")
- result = {"path": f"/images/{name}.jpg"}
-
- except Exception as e:
- print(e)
- traceback.print_exc()
- result = {"error": {"message": "Error when running the model inference."}}
-
- if "device" in pipes[model_id]:
- try:
- pipe.to("cpu")
- torch.cuda.empty_cache()
- except:
- pipe.device = torch.device("cpu")
- pipe.model.to("cpu")
- torch.cuda.empty_cache()
-
- pipes[model_id]["using"] = False
-
- if result is None:
- result = {"error": {"message": "model not found"}}
-
- end = time.time()
- during = end - start
- print(f"[ complete {model_id} ] {during}s")
- print(f"[ result {model_id} ] {result}")
-
- return result
diff --git a/spaces/cccc-c/bingo/src/app/layout.tsx b/spaces/cccc-c/bingo/src/app/layout.tsx
deleted file mode 100644
index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000
--- a/spaces/cccc-c/bingo/src/app/layout.tsx
+++ /dev/null
@@ -1,47 +0,0 @@
-import { Metadata } from 'next'
-import { Toaster } from 'react-hot-toast'
-import { TailwindIndicator } from '@/components/tailwind-indicator'
-import { Providers } from '@/components/providers'
-import { Header } from '@/components/header'
-
-import '@/app/globals.scss'
-
-
-export const metadata: Metadata = {
- title: {
- default: 'Bing AI Chatbot',
- template: `%s - Bing AI Chatbot`
- },
- description: 'Bing AI Chatbot Web App.',
- themeColor: [
- { media: '(prefers-color-scheme: light)', color: 'white' },
- { media: '(prefers-color-scheme: dark)', color: 'dark' }
- ],
- icons: {
- icon: '/favicon.ico',
- shortcut: '../assets/images/logo.svg',
- apple: '../assets/images/logo.svg'
- }
-}
-
-interface RootLayoutProps {
- children: React.ReactNode
-}
-
-export default function RootLayout({ children }: RootLayoutProps) {
- return (
-
-
-
-
-