diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/app.py b/spaces/1acneusushi/gradio-2dmoleculeeditor/app.py deleted file mode 100644 index 5992a7b7ec434e09187510098ecdefad5b81b65e..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import gradio as gr - -viewer_html = """ -
-

loading SMILES editor

- - - - - - - - - - - -
-
-""" - - -load_js = """ -async () => { -var loadingDiv = document.getElementById('loading'); - loadingDiv.style.display = 'flex'; - -//load css - let url = "https://huggingface.co/datasets/simonduerr/ketcher-2.7.2/raw/main/static/css/main.6a646761.css" -fetch(url) - .then(res => res.text()) - .then(text => { - const style = document.createElement('style'); - style.textContent = text - document.head.appendChild(style); - - }); -//load ketcher -url = "https://huggingface.co/datasets/simonduerr/ketcher-2.7.2/resolve/main/static/js/main.5445f351.js" -fetch(url) - .then(res => res.text()) - .then(text => { - const script = document.createElement('script'); - //script.type = "module" - script.src = URL.createObjectURL(new Blob([text], { type: 'application/javascript' })); - document.head.appendChild(script); - loadingDiv.style.display = 'none'; - }); - - -} -""" - -# add your logic here, hidden_state contains the SMILES string returned from Editor -def run(hidden_state): - return f"{hidden_state}" - -get_js = """ -async () => { - return ketcher.getSmiles().then(function(smiFile){return smiFile}) - } -""" - - - -with gr.Blocks() as blocks: - gr.Markdown(""" - # Gradio Molecule entry with Ketcher - """) - html = gr.HTML(viewer_html) - #do not change this part - hidden_state = gr.Textbox(visible=False) - # we need a hidden textbox that can be used to first trigger the JS callback - # and then onchange of the textbox, we can run the python function - out = gr.Textbox("", label="SMILES") - btn = gr.Button("Get SMILES") - # trigger JS callback and written to hidden textbox - btn.click(fn=None, - inputs=[], - outputs=[hidden_state], - _js=get_js) - # run python function on change of hidden textbox, add your logic to run function - hidden_state.change(fn=run, inputs=[hidden_state], outputs=[out]) - # load JS on load of the page - blocks.load(None, None, None, _js=load_js) - -blocks.launch() diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chandramukhi Tamil Movie Free Download Dont Miss this Thrilling and Hilarious Film Featuring Rajinikanth Jyothika and Nayanthara.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chandramukhi Tamil Movie Free Download Dont Miss this Thrilling and Hilarious Film Featuring Rajinikanth Jyothika and Nayanthara.md deleted file mode 100644 index cdeab75cb9e8739744c695f77ae0e9e681dbdeab..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Chandramukhi Tamil Movie Free Download Dont Miss this Thrilling and Hilarious Film Featuring Rajinikanth Jyothika and Nayanthara.md +++ /dev/null @@ -1,125 +0,0 @@ - -

Chandramukhi Tamil Movie Free Download: A Guide for Movie Lovers

-

If you are a fan of Tamil movies, you must have heard of Chandramukhi, one of the most successful and acclaimed movies in Tamil cinema history. Released in 2005, Chandramukhi is a comedy horror film that stars Rajinikanth, Jyothika, Prabhu, Nayanthara, and Vadivelu in the lead roles. Directed by P. Vasu, Chandramukhi is a remake of the Malayalam film Manichitrathazhu (1993), which was also remade in several other languages. Chandramukhi tells the story of a psychiatrist who tries to cure a woman who suffers from a split personality disorder that is linked to a haunted mansion. The movie is a perfect blend of humor, suspense, romance, and drama, with stunning performances, music, and visuals.

-

Chandramukhi Tamil Movie Free Download


DOWNLOADhttps://byltly.com/2uKuZg



-

In this article, we will tell you everything you need to know about Chandramukhi Tamil movie free download. We will give you a brief overview of the plot, the cast and crew, the music and songs, the awards and accolades, the remakes and sequels, and the reasons to watch this movie. We will also warn you about the challenges and risks of watching this movie online for free, and suggest some legal and safe ways to enjoy this movie. Finally, we will offer some alternatives to watch if you like Chandramukhi. So, without further ado, let's get started!

-

The Plot of Chandramukhi

-

The plot of Chandramukhi revolves around Saravanan (Rajinikanth), a psychiatrist who visits his friend Senthilnathan (Prabhu) and his wife Ganga (Jyothika) at their ancestral mansion. Senthil's mother Kasthuri (Sheela) wanted him to marry Priya (Malavika), the daughter of his father's cousin Kandaswamy (Nassar), but he chose Ganga instead. Saravanan learns that Senthil bought the mansion despite being warned by the villagers that it is haunted by the ghost of Chandramukhi (Nayanthara), a dancer who was killed by her lover Vettaiyan (also Rajinikanth), a king who was obsessed with her.

-

Saravanan soon notices that Ganga behaves strangely at times, especially when she hears any music or sees any paintings related to Chandramukhi. He realizes that Ganga is possessed by Chandramukhi's spirit, who wants to take revenge on Vettaiyan's descendants. He decides to cure Ganga by using his psychological methods, while also protecting her from Akhilandeshwari (K.R. Vijaya), Kandaswamy's sister who hates Saravanan and wants to kill him with the help of her assistant Oomaiyan (Vadivelu). Saravanan also helps Priya and Vishwanathan (Vineeth), a dance professor who love each other, to get married with Kandaswamy's consent.

-

The climax of the movie reveals that Vettaiyan was not Chandramukhi's lover, but her savior who rescued her from her abusive husband Raja Rajeshwari's brother. He also reveals that he did not kill Chandramukhi, but she committed suicide after seeing him beheaded by Raja Rajeshwari's men. He then took her body to his palace and locked it in a room where he died with her. Saravanan manages to convince Ganga that she is not Chandramukhi, but his friend's wife who loves him dearly. He also performs a ritual to free Chandramukhi's soul from her earthly bondage. The movie ends with Saravanan and Senthil's families living happily ever after.

-

The Cast and Crew of Chandramukhi

-

The cast and crew of Chandramukhi are as follows:

-

Chandramukhi Tamil full movie download HD
-Chandramukhi Tamil movie free online watch
-Chandramukhi Tamil movie download 720p
-Chandramukhi Tamil movie download in Isaimini
-Chandramukhi Tamil movie download with English subtitles
-Chandramukhi Tamil movie free download Tamilrockers
-Chandramukhi Tamil movie download in Kuttymovies
-Chandramukhi Tamil movie free download in Telegram
-Chandramukhi Tamil movie download in Moviesda
-Chandramukhi Tamil movie free download in Tamilyogi
-Chandramukhi Tamil movie download in Filmywap
-Chandramukhi Tamil movie free download in Movierulz
-Chandramukhi Tamil movie download in Jio Rockers
-Chandramukhi Tamil movie free download in Madras Rockers
-Chandramukhi Tamil movie download in Filmyzilla
-Chandramukhi Tamil movie free download in Todaypk
-Chandramukhi Tamil movie download in Bolly4u
-Chandramukhi Tamil movie free download in 9xmovies
-Chandramukhi Tamil movie download in Worldfree4u
-Chandramukhi Tamil movie free download in 123movies
-Chandramukhi Tamil movie download in Khatrimaza
-Chandramukhi Tamil movie free download in Pagalworld
-Chandramukhi Tamil movie download in SkymoviesHD
-Chandramukhi Tamil movie free download in Mp4moviez
-Chandramukhi Tamil movie download in Sdmoviespoint
-Chandramukhi Tamil movie free download in Rdxhd
-Chandramukhi Tamil movie download in 7starhd
-Chandramukhi Tamil movie free download in Katmoviehd
-Chandramukhi Tamil movie download in Coolmoviez
-Chandramukhi Tamil movie free download in Moviesflix
-Chandramukhi Tamil movie download in Cinemavilla
-Chandramukhi Tamil movie free download in Mallumv
-Chandramukhi Tamil movie download in Klwap
-Chandramukhi Tamil movie free download in Dvdplay
-Chandramukhi Tamil movie download in A2movies
-Chandramukhi Tamil movie free download in Tamilmv
-Chandramukhi Tamil movie download Rajinikanth version
-Chandramukhi Tamil movie free download Prabhu version
-Chandramukhi Tamil movie songs free download mp3
-Chandramukhi Tamil full hd video songs free download
-How to watch or stream chandramukhi tamil full hd online for free?
-Where can I find chandramukhi tamil full hd torrent link?
-Is it legal to watch or download chandramukhi tamil full hd for free?
-What are the best alternatives to chandramukhi tamil full hd?
-How to get chandramukhi tamil full hd subtitles for free?
-What are the reviews and ratings of chandramukhi tamil full hd?
-Who are the cast and crew of chandramukhi tamil full hd?
-What is the plot and genre of chandramukhi tamil full hd?
-How to get chandramukhi tamil full hd poster and wallpaper for free?
-How to watch or download chandramukhi tamil full hd with VPN?

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

The Music and Songs of Chandramukhi

-

The music and songs of Chandramukhi were composed by Vidyasagar, who won several awards for his work. The lyrics were written by Vaali, except for one song which was written by Yugabharathi. The singers included S.P. Balasubrahmanyam, K.S. Chithra, Karthik, Tippu, Manikka Vinayagam, Madhu Balakrishnan, Anuradha Sriram, Harini, Prasanna Rao, Binny Krishnakumar, Rajalakshmi, Kalpana Raghavendar, Mahathi Swara Sagar and Vidyasagar himself.

-

The soundtrack album consists of six songs:

-
    -
  1. Kokku Para Para: A peppy song sung by Tippu, Manikka Vinayagam and Prasanna Rao that introduces Saravanan's character.
  2. -
  3. Raa Raa: A haunting song sung by Binny Krishnakumar and Tippu that describes Chandramukhi's story.
  4. -
  5. Konja Neram: A romantic song sung by Asha Bhosle and Madhu Balakrishnan that features Priya and Vishwanathan.
  6. -
  7. Athinthom: A motivational song sung by S.P. Balasubrahmanyam that encourages Saravanan to face his challenges.
  8. -
  9. Devuda Devuda: A humorous song sung by S.P. Balasubrahmanyam and Vidyasagar that mocks Oomaiyan's antics.
  10. -
  11. Annanoda Pattu: A festive song sung by K.S. Chithra and Rajalakshmi that celebrates Senthilnathan's birthday.
  12. -

    The Awards and Accolades of

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Advanced Installer License V 16.4.1 Patch.md b/spaces/1gistliPinn/ChatGPT4/Examples/Advanced Installer License V 16.4.1 Patch.md deleted file mode 100644 index 8cab721f2e46aadf2a16a7ae79a5d605ecca0cc0..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Advanced Installer License V 16.4.1 Patch.md +++ /dev/null @@ -1,18 +0,0 @@ -

    Advanced installer license v 16.4.1 patch


    Download Ziphttps://imgfil.com/2uxZse



    -
    -119.0. Advanced Installer.... s8.msi) to configure Advanced Installer for debugging. Installer. Advanced Installer 5.1.2.154.0. Automatic generation of debug logs when the deployment starts or completes.. Welcome to the Advanced Installer website. This site contains a reference guide to all the components of Advanced Installer. - -Advanced Installer v5.1 Documentation. From: Advanced Installer Advanced Installer Component Library This documentation is available for you to read. It contains the following files. Advanced Installer Documentation Introduction to Advanced Installer Help Advanced Installer API Advanced Installer UI Advanced Installer Action Reference Advanced Installer Scripting Guide Advanced Installer Workflow Reference Advanced Installer Windows. - -Advanced Installer Documentation. Welcome to the Advanced Installer website. This site contains a reference guide to all the components of Advanced Installer. - -Advanced Installer v5.1.0 Documentation. From: Advanced Installer Advanced Installer Component Library This documentation is available for you to read. It contains the following files. Advanced Installer Documentation Introduction to Advanced Installer Help Advanced Installer API Advanced Installer UI Advanced Installer Action Reference Advanced Installer Scripting Guide Advanced Installer Workflow Reference Advanced Installer Windows. - -The Support Documentation for Advanced Installer is the official source for support information on the Advanced Installer product and any of its components. It contains the following files. - -Advanced Installer v5.1.0.155.0. Manual. From: Advanced Installer Advanced Installer Component Library This documentation is available for you to read. It contains the following files. Advanced Installer Documentation Introduction to Advanced Installer Help Advanced Installer API Advanced Installer UI Advanced Installer Action Reference Advanced Installer Scripting Guide Advanced Installer Workflow Reference Advanced Installer Windows. - -Advanced Installer v5.1.0.155.0. This page lists the documentation for all the components of Advanced Installer. You will find descriptions and references on the use of each component. To access the documentation you need to right click on the component you are interested 4fefd39f24
    -
    -
    -

    diff --git a/spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer on PC Open-World Multiplayer Mode Car Tuning and More.md b/spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer on PC Open-World Multiplayer Mode Car Tuning and More.md deleted file mode 100644 index b4b8d9eb212470cd2ebb57aa982226a470635e00..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Car Parking Multiplayer on PC Open-World Multiplayer Mode Car Tuning and More.md +++ /dev/null @@ -1,113 +0,0 @@ - -

    Car Parking Multiplayer APK PC: A Complete Guide

    -

    Do you love driving and parking games? Do you want to experience a realistic and immersive simulation of car parking on your computer or laptop? If yes, then you should try car parking multiplayer apk pc, a popular game that lets you park your car in various scenarios, customize your vehicle, and interact with other players online.

    -

    In this article, we will tell you everything you need to know about car parking multiplayer apk pc, including what it is, why it is popular, how to download and install it, how to play it, and some tips and tricks to improve your skills and enjoy the game. Let's get started!

    -

    car parking multiplayer apk pc


    Download File ○○○ https://jinyurl.com/2uNNFN



    -

    What is car parking multiplayer apk pc?

    -

    Car parking multiplayer apk pc is a simulation game developed by olzhass, a Turkish game studio. It is available for Android devices, but you can also play it on your computer or laptop using an emulator software. The game has more than 50 million downloads on Google Play Store and has a rating of 4.4 out of 5 stars.

    -

    The game is more than just parking: it has an open-world multiplayer mode, where you can roam freely in the environment, customize your car, and compete in races against other players. You can also communicate with hundreds of thousands of other players worldwide every day. The game's setting is based on a realistic scenario of petrol stations and car services.

    -

    Car parking multiplayer apk pc has a wide variety of cars with realistic interiors and high-detailed landscapes. There are 16 unique character skins, and you can go inside buildings. You can also play as a police officer and chase criminals in a special police mode.

    -

    Car parking multiplayer apk pc is a fun and challenging game that tests your driving and parking skills in different situations. You can choose from different modes, such as easy, medium, hard, or expert, and complete various levels with different objectives. You can also create your own levels and share them with other players.

    -

    How to download and install car parking multiplayer apk pc on your computer or laptop

    -

    To play car parking multiplayer apk pc on your computer or laptop, you need to use an emulator software that will emulate an Android device on your Windows or Mac system. There are many emulators available online, but we will recommend two of them: BlueStacks and LDPlayer. Here are the steps to download and install car parking multiplayer apk pc using these emulators:

    -

    Using BlueStacks emulator

    -
      -
    1. Download and install BlueStacks from here. The installation process is quite simple and straightforward.
    2. -
    3. After successful installation, open BlueStacks and sign in with your Google account to access the Play Store.
    4. -
    5. Look for car parking multiplayer in the search bar at the top right corner of the screen.
    6. -
    7. Click to install car parking multiplayer from the search results.
    8. -
    9. Once the installation is complete, click the car parking multiplayer icon on the home screen to start playing.
    10. -
    -

    Using LDPlayer emulator

    -
      -
    1. Download and install LDPlayer from here. The installation process is similar to Blue Stacks emulator.
    2. -
    3. After successful installation, open LDPlayer and sign in with your Google account to access the Play Store.
    4. -
    5. Look for car parking multiplayer in the search bar at the top of the screen.
    6. -
    7. Click to install car parking multiplayer from the search results.
    8. -
    9. Once the installation is complete, click the car parking multiplayer icon on the home screen to start playing.
    10. -
    -

    Using other emulators

    -

    If you don't want to use BlueStacks or LDPlayer, you can also try other emulators such as NoxPlayer, MEmu, or Andy. The steps are similar to the ones above, except that you need to download and install the emulator of your choice from their respective websites. Then, you need to sign in with your Google account, search for car parking multiplayer in the Play Store, and install and play it as usual.

    -

    How to play car parking multiplayer apk pc on your computer or laptop

    -

    Once you have downloaded and installed car parking multiplayer apk pc on your computer or laptop using an emulator, you can start playing it by clicking the game icon on the home screen of the emulator. You will see a menu with different options, such as single player, multiplayer, settings, and more. You can choose the mode you want to play and customize your preferences accordingly. Here are some of the main features of the game that you can enjoy:

    -

    car parking multiplayer download for pc
    -car parking multiplayer pc game
    -car parking multiplayer windows 10
    -car parking multiplayer online on pc
    -car parking multiplayer simulator for pc
    -car parking multiplayer bluestacks
    -car parking multiplayer noxplayer
    -car parking multiplayer pc version
    -car parking multiplayer mod apk pc
    -car parking multiplayer free download pc
    -car parking multiplayer pc gameplay
    -car parking multiplayer windows 7
    -car parking multiplayer on pc with keyboard
    -car parking multiplayer emulator
    -car parking multiplayer pc controls
    -car parking multiplayer apk for laptop
    -car parking multiplayer pc requirements
    -car parking multiplayer windows 8
    -car parking multiplayer on pc without emulator
    -car parking multiplayer apk for mac
    -car parking multiplayer pc offline
    -car parking multiplayer windows xp
    -car parking multiplayer on pc with mouse
    -car parking multiplayer ldplayer
    -car parking multiplayer pc cheat codes
    -car parking multiplayer apk for desktop
    -car parking multiplayer pc online mode
    -car parking multiplayer windows vista
    -car parking multiplayer on pc with controller
    -car parking multiplayer memu
    -car parking multiplayer pc hack
    -car parking multiplayer apk for chromebook
    -car parking multiplayer pc update
    -car parking multiplayer windows 11
    -car parking multiplayer on pc with steering wheel
    -car parking multiplayer koplayer
    -car parking multiplayer pc tips and tricks
    -car parking multiplayer apk for linux
    -car parking multiplayer pc review
    -car parking multiplayer windows phone
    -car parking multiplayer on pc with friends
    -car parking multiplayer gameloop
    -car parking multiplayer pc settings
    -car parking multiplayer apk for ubuntu
    -car parking multiplayer pc system requirements
    -car parking multiplayer windows store
    -car parking multiplayer on pc with vr headset
    -car parking multiplayer droid4x

    -

    Multiplayer open world mode

    -

    This is the most exciting mode of the game, where you can join or create a room with other players online and explore the open world together. You can chat with other players, race with them, or just have fun driving around. You can also switch between different cars and characters in this mode. There are different maps to choose from, such as city, airport, desert, and more. You can also invite your friends to join your room and play with them.

    -

    Car tuning and customization

    -

    If you love to modify your car and make it look unique, you will love this feature of the game. You can tune and customize your car in various ways, such as changing the color, wheels, suspension, engine, exhaust, spoiler, and more. You can also add stickers and decals to your car to make it stand out. You can access the tuning and customization options by clicking the wrench icon on the top left corner of the screen.

    -

    Police mode and free walking

    -

    If you want to experience some action and thrill in the game, you can try the police mode and free walking features. In police mode, you can play as a police officer and chase criminals in your patrol car. You can use sirens, lights, and radio to communicate with other officers. You can also arrest criminals by bumping into their cars or using a stun gun. In free walking mode, you can get out of your car and walk around the environment. You can enter buildings, interact with objects, and even ride a bicycle.

    -

    Tips and tricks to improve your parking skills and enjoy the game

    -

    Car parking multiplayer apk pc is a game that requires both skill and strategy to master. It is not easy to park your car perfectly in different scenarios without hitting any obstacles or breaking any rules. However, with some practice and tips, you can improve your parking skills and enjoy the game more. Here are some tips and tricks that might help you:

    -

    Adjust the camera angle and view

    -

    One of the most important things to do in the game is to adjust the camera angle and view according to your preference and situation. You can switch between different camera views by clicking the camera icon on the top right corner of the screen. You can choose from first-person view, third-person view, top-down view, or rear-view mirror view. Each view has its own advantages and disadvantages depending on the level and objective. For example, first-person view gives you a realistic feeling of driving inside the car, but it might limit your visibility of the surroundings. Third-person view gives you a wider perspective of the car and the parking spot, but it might make it harder to judge the distance and angle. Top-down view gives you a clear view of the parking spot and the obstacles, but it might make it difficult to control the steering and speed. Rear-view mirror view gives you a realistic view of the rear of the car, but it might not show you the front or sides of the car. Therefore, you should experiment with different views and find the one that suits you best.

    -

    Use the brake and handbrake wisely

    -

    Another important thing to do in the game is to use the brake and handbrake wisely. You can control the brake and handbrake by clicking the pedals on the bottom right corner of the screen. The brake pedal helps you slow down or stop your car, while the handbrake pedal helps you lock your wheels and perform sharp turns or drifts. You should use the brake pedal when you want to reduce your speed gradually or stop your car smoothly. You should use the handbrake pedal when you want to make a quick turn or park your car in a tight spot. However, you should be careful not to overuse or misuse the brake and handbrake pedals, as they might cause your car to skid, spin, or crash.

    -

    Follow the rules and avoid collisions

    -

    One of the main challenges of the game is to follow the rules and avoid collisions while parking your car. You should pay attention to the traffic signs, signals, and markings on the road and follow them accordingly. You should also respect other cars and pedestrians on the road and avoid hitting them. If you break any rules or cause any collisions, you will lose points or fail the level. Therefore, you should drive carefully and responsibly in the game.

    -

    Explore the map and find hidden locations

    -

    One of the fun aspects of the game is to explore the map and find hidden locations. The game has a large and detailed map with various locations, such as city streets, parking lots, airports, deserts, and more. You can discover new places by driving around or using the map icon on the top left corner of the screen. You can also find hidden locations by following clues or hints on the road or in buildings. Some hidden locations might have special rewards or challenges for you to complete.

    -

    Conclusion

    -

    Car parking multiplayer apk pc is a great game for anyone who loves driving and parking games. It offers a realistic and immersive simulation of car parking on your computer or laptop, with an open-world multiplayer mode, car tuning and customization, police mode and free walking, and more. You can download and install car parking multiplayer apk pc on your computer or laptop using an emulator software such as BlueStacks or LDPlayer. You can also improve your parking skills and enjoy the game more by following some tips and tricks, such as adjusting the camera angle and view, using the brake and handbrake wisely, following the rules and avoiding collisions, and exploring the map and finding hidden locations.

    -

    We hope this article has helped you learn more about car parking multiplayer apk pc and how to play it on your computer or laptop. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you!

    -

    FAQs

    -

    Here are some frequently asked questions about car parking multiplayer apk pc:

    -

    What are the system requirements for car parking multiplayer apk pc?

    -

    The system requirements for car parking multiplayer apk pc depend on the emulator software you use to play it on your computer or laptop. However, generally speaking, you need a Windows 7/8/10 or Mac OS system with at least 4 GB of RAM, 5 GB of free disk space, a decent graphics card, and a stable internet connection.

    -

    How to update car parking multiplayer apk pc on your computer or laptop?

    -

    To update car parking multiplayer apk pc on your computer or laptop, you need to update it through the emulator software you use. For example, if you use BlueStacks, you need to open the Play Store app on the emulator and look for car parking multiplayer. If there is an update available, you will see an update button next to the game. Click it to download and install the latest version of the game. Similarly, if you use LDPlayer or any other emulator, you need to follow the same steps to update the game through the Play Store app on the emulator.

    -

    How to join or create a room in multiplayer mode?

    -

    To join or create a room in multiplayer mode, you need to click the multiplayer option on the main menu of the game. Then, you will see a list of rooms that are available to join. You can filter the rooms by map, mode, language, or region. You can also search for a specific room by its name or ID. To join a room, simply click on it and wait for it to load. To create a room, you need to click the create button on the top right corner of the screen. Then, you can choose the map, mode, name, password, and maximum number of players for your room. You can also invite your friends to join your room by sharing its name or ID with them.

    -

    How to chat with other players in the game?

    -

    To chat with other players in the game, you need to click the chat icon on the top left corner of the screen. Then, you will see a chat window where you can type and send messages to other players in your room or in the global chat. You can also use voice chat by clicking the microphone icon on the bottom right corner of the screen. However, you need to grant permission to the emulator software to access your microphone for this feature to work.

    -

    How to earn money and buy new cars in the game?

    -

    To earn money and buy new cars in the game, you need to complete levels and challenges in single player mode or multiplayer mode. You will get money as a reward for completing each level or challenge successfully. You can also get money by watching ads or buying it with real money through in-app purchases. To buy new cars in the game, you need to click the car icon on the top left corner of the screen. Then, you will see a list of cars that are available to buy with different prices and specifications. You can also preview each car before buying it by clicking the eye icon on the bottom right corner of the screen.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/European War 61914 MOD APK - How to Install and Play with All Unlocked.md b/spaces/1phancelerku/anime-remove-background/European War 61914 MOD APK - How to Install and Play with All Unlocked.md deleted file mode 100644 index 7e37840b1e79dd0a8711c7491a46a0e60c892c15..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/European War 61914 MOD APK - How to Install and Play with All Unlocked.md +++ /dev/null @@ -1,247 +0,0 @@ - -

    European War 6: 1914 Mod Apk Unlock All - A Guide for Strategy Game Fans

    -

    If you are a fan of strategy games that simulate historical wars, you might have heard of European War 6: 1914, a popular game developed by Easytech, a company that specializes in historical strategy games. In this game, you can choose from over 150 countries and regions, and lead them to victory or defeat in various wars and conflicts that took place between 1798 and 1950. You can also customize your own generals, troops, weapons, and technologies, and challenge other players online or offline.

    -

    european war 6 1914 mod apk unlock all


    Download Zip https://jinyurl.com/2uNLfF



    -

    However, some players may find the game too difficult, too expensive, or too boring after a while. That's why some of them resort to using a mod apk, which is a modified version of the original game application that can unlock all the features, resources, and content that are otherwise restricted or limited in the game. A mod apk can give you unlimited money, medals, generals, troops, weapons, technologies, and more. It can also remove ads, bugs, and errors that may affect your gameplay.

    -

    But is using a mod apk for European War 6: 1914 a good idea? What are the benefits and risks of doing so? How can you download and install a mod apk for European War 6: 1914? In this article, we will answer these questions and more. We will also provide you with some tips and tricks on how to use a mod apk for European War 6: 1914 safely and effectively. Read on to find out more!

    -

    What is European War 6: 1914 and what are its features?

    -

    European War 6: 1914 is a strategy game that simulates the historical wars of the 19th and 20th centuries. It is the sixth installment of the European War series, which started in 2010 with European War: Napoleon Wars. The game was released in 2020 for Android and iOS devices.

    -

    The game has four main modes: Campaign, Conquest, Challenge, and Multiplayer. In Campaign mode, you can follow the historical events and scenarios of different wars and regions, such as the Napoleonic Wars, the American Civil War, the World War I, the World War II, etc. You can choose from different countries and factions, and complete various missions and objectives to progress through the story. In Conquest mode, you can create your own scenarios and maps, and conquer the world with your own strategy and tactics. You can also adjust the difficulty level, the number of countries and regions, the resources and technologies available, etc. In Challenge mode, you can test your skills and knowledge in different quizzes and puzzles related to history and geography. You can also earn medals and rewards for completing them. In Multiplayer mode, you can play with or against other players online or offline via Wi-Fi or Bluetooth. You can also chat with them, send them gifts, or join alliances.

    -

    The game has over 150 countries and regions to choose from, each with their own unique generals, troops, weapons, and technologies. You can also customize your own generals by changing their names, portraits, skills, ranks, etc. You can also upgrade your troops by training them, equipping them with different weapons and armors, etc. You can also research new technologies by spending money and medals on them. The game has over 200 historical battles to fight in, each with their own terrain, weather, objectives, etc. You can also use different strategies and tactics to win them, such as diplomacy, espionage, sabotage, etc.

    -

    european war 6 1914 mod apk unlimited money and medals
    -european war 6 1914 hack mod apk free download
    -european war 6 1914 mod apk latest version
    -european war 6 1914 mod apk all generals unlocked
    -european war 6 1914 mod apk android 1
    -european war 6 1914 mod apk revdl
    -european war 6 1914 mod apk no root
    -european war 6 1914 mod apk offline
    -european war 6 1914 mod apk obb
    -european war 6 1914 mod apk rexdl
    -european war 6 1914 mod apk premium
    -european war 6 1914 mod apk full version
    -european war 6 1914 mod apk mega
    -european war 6 1914 mod apk data
    -european war 6 1914 mod apk vip
    -european war 6 1914 mod apk pro
    -european war 6 1914 mod apk cracked
    -european war 6 1914 mod apk cheat
    -european war 6 1914 mod apk hack download
    -european war 6 1914 mod apk update
    -european war 6 1914 mod apk new version
    -european war 6 1914 mod apk original
    -european war 6 1914 mod apk for pc
    -european war 6 1914 mod apk for ios
    -european war 6 1914 mod apk for windows
    -european war 6 1914 mod apk for mac
    -european war 6 1914 mod apk for laptop
    -european war 6 1914 mod apk for tablet
    -european war 6 1914 mod apk for chromebook
    -european war 6 1914 mod apk for android tv
    -european war 6: world at war - ww1 strategy game mod apk unlock all
    -easytech's world conquest games: ww1 ww2 civil war - all unlocked with mods and cheats
    -how to install and play european war: world at war - ww1 strategy game with mods and hacks on android devices
    -best tips and tricks for playing and winning in european war: world at war - ww1 strategy game with mods and hacks on android devices
    -how to get free money and medals in european war: world at war - ww1 strategy game with mods and hacks on android devices
    -how to unlock all generals and scenarios in european war: world at war - ww1 strategy game with mods and hacks on android devices
    -how to upgrade and customize your troops and weapons in european war: world at war - ww1 strategy game with mods and hacks on android devices
    -how to use diplomacy and alliances in european war: world at war - ww1 strategy game with mods and hacks on android devices
    -how to conquer the world and win the great wars in european war: world at war - ww1 strategy game with mods and hacks on android devices
    -how to play multiplayer mode in european war: world at war - ww1 strategy game with mods and hacks on android devices

    -

    The game has high-quality graphics that depict the historical scenes and characters in detail. The game also has realistic sound effects that enhance the atmosphere of war. The game has a user-friendly interface that allows you to control your units easily and efficiently. The game also has a tutorial mode that teaches you the basics of the game.

    -

    The game is similar to other historical strategy games such as Age of Civilizations II , Age of Empires, or Civilization. However, it has its own unique features and challenges that make it stand out from the crowd. If you are looking for a strategy game that combines historical accuracy, complexity, and fun, you might want to give European War 6: 1914 a try.

    -

    What is a mod apk and why do some players use it?

    -

    A mod apk is a modified version of an original game application that can alter or enhance some aspects of the game. A mod apk can be created by the game developers themselves, or by third-party programmers or hackers who have access to the game's source code. A mod apk can be downloaded from various websites or platforms, such as Google Play, App Store, or APKPure.

    -

    Some players use a mod apk for various reasons, such as:

    - -

    However, using a mod apk also comes with some legal and ethical issues, such as:

    - -

    Therefore, using a mod apk for European War 6: 1914 is a personal choice that depends on your own judgment and responsibility. You should weigh the pros and cons carefully before deciding to use a mod apk for European War 6: 1914.

    -

    What are the benefits of using a mod apk for European War 6: 1914?

    If you decide to use a mod apk for European War 6: 1914, you can enjoy some benefits that the original game may not offer. Here are some of them:

    - -

    To illustrate these benefits, here is a table that compares the features of the original game and the mod apk:

    -
RoleActor/ActressDirector
Saravanan/VettaiyanRajinikanthP. Vasu
Ganga/ChandramukhiJyothika
SenthilnathanPrabhu
Durga/NayantharaNayanthara
OomaiyanVadivelu
PriyaMalavika
VishwanathanVineeth
KandaswamyNassar
AkhilandeshwariK.R. Vijaya
KasthuriSheela
Raja Rajeshwari's brotherSonu Sood
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FeatureOriginal GameMod Apk
MoneyLimitedUnlimited
MedalsLimitedUnlimited
GeneralsLimitedUnlimited
TroopsLimitedUnlimited
WeaponsLimitedUnlimited
TechnologiesLimitedUnlimited
ModesLimitedAll unlocked
CampaignsLimitedAll unlocked
ConquestsLimitedAll unlocked
ChallengesLimitedAll unlocked
MultiplayerLimitedAll unlocked
AdsPresentRemoved
Bugs and errorsPresentFixed
CustomizationLimitedEnhanced
New possibilities and scenariosLimitedAdded
Gameplay experience and enjoymentLimitedImproved
-

As you can see, using a mod apk for European War 6: 1914 can provide you with many benefits that can make your game more enjoyable and rewarding. However, you should also be aware of the risks and drawbacks of using a mod apk for European War 6: 1914, which we will discuss in the next section.

-

What are the risks and drawbacks of using a mod apk for European War 6: 1914?

-

Using a mod apk for European War 6: 1914 is not without its risks and drawbacks. Here are some of them:

- -

To illustrate these risks and drawbacks, here is a table that compares them with the original game and the mod apk:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Risk/DrawbackOriginal GameMod Apk
Legal and ethical issuesNonePresent
Viruses, malware, or scamsNonePossible
Balance and fairnessPresentDisrupted
Original design and intentionPresentRuined
Official support and updatesPresentLost
Ban or suspensionNonePossible
-

As you can see, using a mod apk for European War 6: 1914 can also expose you to some risks and drawbacks that can make your game less enjoyable and rewarding. Therefore, you should be careful and cautious when using a mod apk for European War 6: 1914.

-

How to download and install a mod apk for European War 6: 1914?

-

If you still want to use a mod apk for European War 6: 1914, you need to know how to download and install it on your device. Here are the steps that you need to follow:

-
    -
  1. Find a reliable source where you can download a mod apk for European War 6: 1914. You can search online for some websites or platforms that offer mod apks for various games, or you can ask other players who have used a mod apk for European War 6: 1914 before. However, you should be careful and wary of some sources that may contain viruses, malware, or scams that can harm your device or data.
  2. -
  3. Download the mod apk file from the source that you have chosen. You may need to allow your device to download files from unknown sources in your settings. You may also need to disable your antivirus or firewall software temporarily to avoid any interference.
  4. -
  5. Install the mod apk file on your device. You may need to uninstall the original game application first if you have it on your device. You may also need to enable the installation of apps from unknown sources in your settings. You may also need to grant some permissions or access to the mod apk file during the installation process.
  6. -
  7. Launch the mod apk file on your device. You may need to verify or activate the mod apk file by following some instructions or entering some codes. You may also need to create an account or log in with an existing one to access the mod apk file.
  8. -
  9. Enjoy the game with the mod apk file. You can now play European War 6: 1914 with all the features, resources, and content that are unlocked by the mod apk file. However, you should also be aware of the risks and drawbacks of using a mod apk file, as we discussed in the previous section.
  10. -
-

To help you with finding a reliable source where you can download a mod apk for European War 6: 1914, here is a link that you can use as a reference:

-

European War 6: 1914 Mod Apk Unlock All - APKPure.com

-

This is a website that offers mod apks for various games, including European War 6: 1914. It claims that its mod apks are safe, tested, and verified by its users and editors. However, you should still be careful and cautious when downloading and installing any mod apk from any source, as there is no guarantee that they are free from viruses, malware, or scams.

-

Conclusion

-

In this article, we have discussed what European War 6: 1914 is and what are its features, what a mod apk is and why some players use it, what are the benefits and risks of using a mod apk for European War 6: 1914, and how to download and install a mod apk for European War 6: 1914. We have also provided you with some tips and tricks on how to use a mod apk for European War 6: 1914 safely and effectively.

-

We hope that this article has been helpful and informative for you. If you are a fan of strategy games that simulate historical wars, you might want to give European War 6: 1914 a try. However, if you decide to use a mod apk for European War 6: 1914, you should weigh the pros and cons carefully before doing so. You should also be responsible and respectful when playing the game with or without a mod apk.

-

We would love to hear your opinions, experiences, and feedback on European War 6: 1914 and its mod apk. Please feel free to share them with us in the comments section below. Thank you for reading and happy gaming!

-

FAQs

-

Here are some frequently asked questions about European War 6: 1914 and its mod apk, along with their answers:

-

Q: Is European War 6: 1914 free to play?

-

A: Yes, European War 6: 1914 is free to download and play on Android and iOS devices. However, the game may contain some in-app purchases or ads that may require real money or interrupt the gameplay.

-

Q: Is using a mod apk for European War 6: 1914 legal?

-

A: No, using a mod apk for European War 6: 1914 is not legal, as it violates the terms and conditions of the game developers or publishers, and infringes their intellectual property rights. Using a mod apk for European War 6: 1914 may result in legal actions or penalties against you.

-

Q: Is using a mod apk for European War 6: 1914 safe?

-

A: No, using a mod apk for European War 6: 1914 is not safe, as it exposes your device or data to viruses, malware, or scams that can harm them. Using a mod apk for European War 6: 1914 may also make your game incompatible with the official updates or patches, or lose access to the official online services or features.

-

Q: Is using a mod apk for European War 6: 1914 fair?

-

A: No, using a mod apk for European War 6: 1914 is not fair, as it disrupts the balance and fairness of the game for other players who play the game legitimately. Using a mod apk for European War 6: 1914 may also encounter other players who use mod apks to cheat or hack the game.

-

Q: Is using a mod apk for European War 6: 1914 fun?

-

A: It depends on your personal preference and judgment. Some players may find using a mod apk for European War 6: 1914 fun, as it unlocks all the features, resources, and content that are otherwise restricted or limited in the game. However, some players may find using a mod apk for European War 6: 1914 boring, as it removes the challenge and achievement that come with playing the game legitimately.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_img2img.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_img2img.py deleted file mode 100644 index 73b303700e17d247aa9b0fab5882938b1216daf4..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_img2img.py +++ /dev/null @@ -1,458 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union - -import numpy as np -import paddle -import PIL - -from paddlenlp.transformers import CLIPFeatureExtractor, CLIPTokenizer - -from ...fastdeploy_utils import FastDeployRuntimeModel -from ...pipeline_utils import DiffusionPipeline -from ...schedulers import ( - DDIMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, -) -from ...utils import PIL_INTERPOLATION, logging -from . import StableDiffusionPipelineOutput - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess -def preprocess(image): - if isinstance(image, paddle.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - w, h = image[0].size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - - image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = image.transpose(0, 3, 1, 2) - image = 2.0 * image - 1.0 - image = paddle.to_tensor(image) - elif isinstance(image[0], paddle.Tensor): - image = paddle.concat(image, axis=0) - return image - - -class FastDeployStableDiffusionImg2ImgPipeline(DiffusionPipeline): - r""" - Pipeline for text-guided image-to-image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving etc.) - - Args: - vae_encoder ([`FastDeployRuntimeModel`]): - Variational Auto-Encoder (VAE) Model to encode images to latent representations. - vae_decoder ([`FastDeployRuntimeModel`]): - Variational Auto-Encoder (VAE) Model to decode images from latent representations. - text_encoder ([`FastDeployRuntimeModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`FastDeployRuntimeModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`] - or [`DPMSolverMultistepScheduler`]. - safety_checker ([`FastDeployRuntimeModel`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae_encoder: FastDeployRuntimeModel, - vae_decoder: FastDeployRuntimeModel, - text_encoder: FastDeployRuntimeModel, - tokenizer: CLIPTokenizer, - unet: FastDeployRuntimeModel, - scheduler: Union[ - DDIMScheduler, - PNDMScheduler, - LMSDiscreteScheduler, - EulerDiscreteScheduler, - EulerAncestralDiscreteScheduler, - DPMSolverMultistepScheduler, - ], - safety_checker: FastDeployRuntimeModel, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = True, - ): - super().__init__() - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. PaddleNLP team, diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - self.register_modules( - vae_encoder=vae_encoder, - vae_decoder=vae_decoder, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - """ - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="np", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="np").input_ids - - if not np.array_equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - text_embeddings = self.text_encoder(input_ids=text_input_ids.astype(np.int64))[0] - text_embeddings = np.repeat(text_embeddings, num_images_per_prompt, axis=0) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] * batch_size - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="np", - ) - uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int64))[0] - uncond_embeddings = np.repeat(uncond_embeddings, num_images_per_prompt, axis=0) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) - - return text_embeddings - - def run_safety_checker(self, image, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor( - self.numpy_to_pil(image), return_tensors="np" - ).pixel_values.astype(dtype) - # There will throw an error if use safety_checker batchsize>1 - images, has_nsfw_concept = [], [] - for i in range(image.shape[0]): - image_i, has_nsfw_concept_i = self.safety_checker( - clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] - ) - images.append(image_i) - has_nsfw_concept.append(has_nsfw_concept_i[0]) - image = np.concatenate(images) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - image = np.concatenate( - [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] - ) - image = np.clip(image / 2 + 0.5, 0, 1) - image = image.transpose([0, 2, 3, 1]) - return image - - def prepare_extra_step_kwargs(self, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - return extra_step_kwargs - - def check_inputs(self, prompt, strength, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [1.0, 1.0] but is {strength}") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - def get_timesteps(self, num_inference_steps, strength): - # get the original timestep using init_timestep - offset = self.scheduler.config.get("steps_offset", 0) - init_timestep = int(num_inference_steps * strength) + offset - init_timestep = min(init_timestep, num_inference_steps) - - t_start = max(num_inference_steps - init_timestep + offset, 0) - timesteps = self.scheduler.timesteps - timesteps = timesteps[t_start:] - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, generator=None, noise=None): - if generator is None: - generator = np.random - - image = image.astype(dtype) - init_latents = self.vae_encoder(sample=image)[0] - init_latents = 0.18215 * init_latents - - if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." - ) - else: - init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) - - # add noise to latents using the timesteps - if noise is None: - noise = paddle.to_tensor(generator.randn(*init_latents.shape).astype(dtype)) - elif list(noise.shape) != list(init_latents.shape): - raise ValueError(f"Unexpected noise shape, got {noise.shape}, expected {init_latents.shape}") - elif isinstance(noise, np.ndarray): - noise = paddle.to_tensor(noise, dtype=dtype) - - # get latents - init_latents = self.scheduler.add_noise(paddle.to_tensor(init_latents), noise, timestep) - latents = init_latents - - return latents - - def __call__( - self, - prompt: Union[str, List[str]], - image: Union[np.ndarray, PIL.Image.Image] = None, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[np.random.RandomState] = None, - noise: Optional[np.ndarray] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - callback_steps: Optional[int] = 1, - ): - r""" - Function invoked when calling the pipeline for generation. - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. - `image` will be used as a starting point, adding more noise to it the larger the `strength`. The - number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added - noise will be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. This parameter will be modulated by `strength`. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`np.random.RandomState`, *optional*): - A np.random.RandomState to make generation deterministic. - noise (`np.ndarray`, *optional*): - Pre-generated noise tensor, sampled from a Gaussian distribution, to be used as inputs for image - generation. If not provided, a noise tensor will ge generated by sampling using the supplied random - `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 1. Check inputs - self.check_inputs(prompt, strength, callback_steps) - - # 2. Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_embeddings = self._encode_prompt( - prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt - ) - - # 4. Preprocess image - image = preprocess(image) - - # 5. set timesteps - self.scheduler.set_timesteps(num_inference_steps) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength) - latent_timestep = timesteps[:1].tile([batch_size * num_images_per_prompt]) - - # 6. Prepare latent variables - latents = self.prepare_latents( - image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, generator, noise - ) - - # 7. Prepare extra step kwargs. - extra_step_kwargs = self.prepare_extra_step_kwargs(eta) - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - text_embeddings = paddle.to_tensor(text_embeddings) - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet.zero_copy_infer( - sample=latent_model_input, timestep=t, encoder_hidden_states=text_embeddings - )[0] - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - scheduler_output = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs) - latents = scheduler_output.prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # 9. Post-processing - image = self.decode_latents(latents.numpy()) - - # 10. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, text_embeddings.dtype) - - # 11. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/2023Liu2023/bingo/src/lib/hooks/use-at-bottom.tsx b/spaces/2023Liu2023/bingo/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/__init__.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/4Taps/SadTalker/src/face3d/options/test_options.py b/spaces/4Taps/SadTalker/src/face3d/options/test_options.py deleted file mode 100644 index 4ff3ad142779850d1d5a1640bc00f70d34d4a862..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/options/test_options.py +++ /dev/null @@ -1,21 +0,0 @@ -"""This script contains the test options for Deep3DFaceRecon_pytorch -""" - -from .base_options import BaseOptions - - -class TestOptions(BaseOptions): - """This class includes test options. - - It also includes shared options defined in BaseOptions. - """ - - def initialize(self, parser): - parser = BaseOptions.initialize(self, parser) # define shared options - parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') - parser.add_argument('--dataset_mode', type=str, default=None, help='chooses how datasets are loaded. [None | flist]') - parser.add_argument('--img_folder', type=str, default='examples', help='folder for test images.') - - # Dropout and Batchnorm has different behavior during training and test. - self.isTrain = False - return parser diff --git a/spaces/656-156/Real-CUGAN/upcunet_v3.py b/spaces/656-156/Real-CUGAN/upcunet_v3.py deleted file mode 100644 index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000 --- a/spaces/656-156/Real-CUGAN/upcunet_v3.py +++ /dev/null @@ -1,714 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F -import os, sys -import numpy as np - -root_path = os.path.abspath('.') -sys.path.append(root_path) - - -class SEBlock(nn.Module): - def __init__(self, in_channels, reduction=8, bias=False): - super(SEBlock, self).__init__() - self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias) - self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias) - - def forward(self, x): - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half() - else: - x0 = torch.mean(x, dim=(2, 3), keepdim=True) - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - def forward_mean(self, x, x0): - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - -class UNetConv(nn.Module): - def __init__(self, in_channels, mid_channels, out_channels, se): - super(UNetConv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - nn.Conv2d(mid_channels, out_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - ) - if se: - self.seblock = SEBlock(out_channels, reduction=8, bias=True) - else: - self.seblock = None - - def forward(self, x): - z = self.conv(x) - if self.seblock is not None: - z = self.seblock(z) - return z - - -class UNet1(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet1x3(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1x3, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet2(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet2, self).__init__() - - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 64, 128, se=True) - self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0) - self.conv3 = UNetConv(128, 256, 128, se=True) - self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0) - self.conv4 = UNetConv(128, 64, 64, se=True) - self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv5 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3(x3) - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4(x2 + x3) - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - def forward_a(self, x): # conv234结尾有se - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x2): # conv234结尾有se - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3.conv(x3) - return x3 - - def forward_c(self, x2, x3): # conv234结尾有se - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4.conv(x2 + x3) - return x4 - - def forward_d(self, x1, x4): # conv234结尾有se - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - -class UpCunet2x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet2x, self).__init__() - self.unet1 = UNet1(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 36, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 36, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2] - return res # - - -class UpCunet3x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet3x, self).__init__() - self.unet1 = UNet1x3(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 4 + 1) * 4 - pw = ((w0 - 1) // 4 + 1) * 4 - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除 - else: - crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 28, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 28, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop # - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3] - return res - - -class UpCunet4x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet4x, self).__init__() - self.unet1 = UNet1(in_channels, 64, deconv=True) - self.unet2 = UNet2(64, 64, deconv=False) - self.ps = nn.PixelShuffle(2) - self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True) - - def forward(self, x, tile_mode): - n, c, h0, w0 = x.shape - x00 = x - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - x = self.conv_final(x) - x = F.pad(x, (-1, -1, -1, -1)) - x = self.ps(x) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4] - x += F.interpolate(x00, scale_factor=4, mode='nearest') - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 38, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 38, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - x_crop = self.conv_final(x_crop) - x_crop = F.pad(x_crop, (-1, -1, -1, -1)) - x_crop = self.ps(x_crop) - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape) - res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4] - res += F.interpolate(x00, scale_factor=4, mode='nearest') - return res # - - -class RealWaifuUpScaler(object): - def __init__(self, scale, weight_path, half, device): - weight = torch.load(weight_path, map_location="cpu") - self.model = eval("UpCunet%sx" % scale)() - if (half == True): - self.model = self.model.half().to(device) - else: - self.model = self.model.to(device) - self.model.load_state_dict(weight, strict=True) - self.model.eval() - self.half = half - self.device = device - - def np2tensor(self, np_frame): - if (self.half == False): - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255 - else: - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255 - - def tensor2np(self, tensor): - if (self.half == False): - return ( - np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0))) - else: - return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), - (1, 2, 0))) - - def __call__(self, frame, tile_mode): - with torch.no_grad(): - tensor = self.np2tensor(frame) - result = self.tensor2np(self.model(tensor, tile_mode)) - return result - - -if __name__ == "__main__": - ###########inference_img - import time, cv2, sys - from time import time as ttime - - for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3), - ("weights_v3/up4x-latest-denoise3x.pth", 4)]: - for tile_mode in [0, 1, 2, 3, 4]: - upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0") - input_dir = "%s/input_dir1" % root_path - output_dir = "%s/opt-dir-all-test" % root_path - os.makedirs(output_dir, exist_ok=True) - for name in os.listdir(input_dir): - print(name) - tmp = name.split(".") - inp_path = os.path.join(input_dir, name) - suffix = tmp[-1] - prefix = ".".join(tmp[:-1]) - tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - print(inp_path, tmp_path) - # 支持中文路径 - # os.link(inp_path, tmp_path)#win用硬链接 - os.symlink(inp_path, tmp_path) # linux用软链接 - frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]] - t0 = ttime() - result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1] - t1 = ttime() - print(prefix, "done", t1 - t0) - tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - cv2.imwrite(tmp_opt_path, result) - n = 0 - while (1): - if (n == 0): - suffix = "_%sx_tile%s.png" % (scale, tile_mode) - else: - suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) # - if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False): - break - else: - n += 1 - final_opt_path = os.path.join(output_dir, prefix + suffix) - os.rename(tmp_opt_path, final_opt_path) - os.remove(tmp_path) diff --git a/spaces/801artistry/RVC801/julius/fftconv.py b/spaces/801artistry/RVC801/julius/fftconv.py deleted file mode 100644 index 1920e5369bb49b76eeea1832b7be2a0ddbc8db6b..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/julius/fftconv.py +++ /dev/null @@ -1,183 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 - -""" -Implementation of a FFT based 1D convolution in PyTorch. -While FFT is used in CUDNN for small kernel sizes, it is not the case for long ones, e.g. 512. -This module implements efficient FFT based convolutions for such convolutions. A typical -application is for evaluationg FIR filters with a long receptive field, typically -evaluated with a stride of 1. -""" -from typing import Optional - -import torch -try: - import torch.fft as new_fft -except ImportError: - new_fft = None # type: ignore -from torch.nn import functional as F - -from .core import pad_to, unfold -from .utils import simple_repr - - -# This is quite verbose, but sadly needed to make TorchScript happy. -def _new_rfft(x: torch.Tensor): - z = new_fft.rfft(x, dim=-1) - return torch.view_as_real(z) - - -def _old_rfft(x: torch.Tensor): - return torch.rfft(x, 1) # type: ignore - - -def _old_irfft(x: torch.Tensor, length: int): - result = torch.irfft(x, 1, signal_sizes=(length,)) # type: ignore - return result - - -def _new_irfft(x: torch.Tensor, length: int): - x = torch.view_as_complex(x) - return new_fft.irfft(x, length, dim=-1) - - -if new_fft is None: - _rfft = _old_rfft - _irfft = _old_irfft -else: - _rfft = _new_rfft - _irfft = _new_irfft - - -def _compl_mul_conjugate(a: torch.Tensor, b: torch.Tensor): - """ - Given a and b two tensors of dimension 4 - with the last dimension being the real and imaginary part, - returns a multiplied by the conjugate of b, the multiplication - being with respect to the second dimension. - - """ - # PyTorch 1.7 supports complex number, but not for all operations. - # Once the support is widespread, this can likely go away. - - op = "bcft,dct->bdft" - return torch.stack([ - torch.einsum(op, a[..., 0], b[..., 0]) + torch.einsum(op, a[..., 1], b[..., 1]), - torch.einsum(op, a[..., 1], b[..., 0]) - torch.einsum(op, a[..., 0], b[..., 1]) - ], - dim=-1) - - -def fft_conv1d( - input: torch.Tensor, weight: torch.Tensor, - bias: Optional[torch.Tensor] = None, stride: int = 1, padding: int = 0, - block_ratio: float = 5): - """ - Same as `torch.nn.functional.conv1d` but using FFT for the convolution. - Please check PyTorch documentation for more information. - - Args: - input (Tensor): input signal of shape `[B, C, T]`. - weight (Tensor): weight of the convolution `[D, C, K]` with `D` the number - of output channels. - bias (Tensor or None): if not None, bias term for the convolution. - stride (int): stride of convolution. - padding (int): padding to apply to the input. - block_ratio (float): can be tuned for speed. The input is splitted in chunks - with a size of `int(block_ratio * kernel_size)`. - - Shape: - - - Inputs: `input` is `[B, C, T]`, `weight` is `[D, C, K]` and bias is `[D]`. - - Output: `(*, T)` - - - ..note:: - This function is faster than `torch.nn.functional.conv1d` only in specific cases. - Typically, the kernel size should be of the order of 256 to see any real gain, - for a stride of 1. - - ..Warning:: - Dilation and groups are not supported at the moment. This function might use - more memory than the default Conv1d implementation. - """ - input = F.pad(input, (padding, padding)) - batch, channels, length = input.shape - out_channels, _, kernel_size = weight.shape - - if length < kernel_size: - raise RuntimeError(f"Input should be at least as large as the kernel size {kernel_size}, " - f"but it is only {length} samples long.") - if block_ratio < 1: - raise RuntimeError("Block ratio must be greater than 1.") - - # We are going to process the input blocks by blocks, as for some reason it is faster - # and less memory intensive (I think the culprit is `torch.einsum`. - block_size: int = min(int(kernel_size * block_ratio), length) - fold_stride = block_size - kernel_size + 1 - weight = pad_to(weight, block_size) - weight_z = _rfft(weight) - - # We pad the input and get the different frames, on which - frames = unfold(input, block_size, fold_stride) - - frames_z = _rfft(frames) - out_z = _compl_mul_conjugate(frames_z, weight_z) - out = _irfft(out_z, block_size) - # The last bit is invalid, because FFT will do a circular convolution. - out = out[..., :-kernel_size + 1] - out = out.reshape(batch, out_channels, -1) - out = out[..., ::stride] - target_length = (length - kernel_size) // stride + 1 - out = out[..., :target_length] - if bias is not None: - out += bias[:, None] - return out - - -class FFTConv1d(torch.nn.Module): - """ - Same as `torch.nn.Conv1d` but based on `fft_conv1d`. - Please check PyTorch documentation for more information. - - Args: - in_channels (int): number of input channels. - out_channels (int): number of output channels. - kernel_size (int): kernel size of convolution. - stride (int): stride of convolution. - padding (int): padding to apply to the input. - bias (bool): if True, use a bias term. - - ..note:: - This module is faster than `torch.nn.Conv1d` only in specific cases. - Typically, `kernel_size` should be of the order of 256 to see any real gain, - for a stride of 1. - - ..warning:: - Dilation and groups are not supported at the moment. This module might use - more memory than the default Conv1d implementation. - - >>> fftconv = FFTConv1d(12, 24, 128, 4) - >>> x = torch.randn(4, 12, 1024) - >>> print(list(fftconv(x).shape)) - [4, 24, 225] - """ - def __init__(self, in_channels: int, out_channels: int, kernel_size: int, - stride: int = 1, padding: int = 0, bias: bool = True): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.stride = stride - self.padding = padding - - conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size, bias=bias) - self.weight = conv.weight - self.bias = conv.bias - - def forward(self, input: torch.Tensor): - return fft_conv1d( - input, self.weight, self.bias, self.stride, self.padding) - - def __repr__(self): - return simple_repr(self, overrides={"bias": self.bias is not None}) diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/dataset_utils.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/dataset_utils.py deleted file mode 100644 index 9e31ce3aba637a5c373caf1559310ec029338533..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/dataset_utils.py +++ /dev/null @@ -1,259 +0,0 @@ -from utils.cwt import get_lf0_cwt -import torch.optim -import torch.utils.data -import importlib -from utils.indexed_datasets import IndexedDataset -from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse -import numpy as np -from tasks.base_task import BaseDataset -import torch -import torch.optim -import torch.utils.data -import utils -import torch.distributions -from utils.hparams import hparams -from resemblyzer import VoiceEncoder -import json -from data_gen.tts.data_gen_utils import build_phone_encoder - -class BaseTTSDataset(BaseDataset): - def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None): - super().__init__(shuffle) - self.data_dir = hparams['binary_data_dir'] if data_dir is None else data_dir - self.prefix = prefix - self.hparams = hparams - self.indexed_ds = None - self.ext_mel2ph = None - - def load_size(): - self.sizes = np.load(f'{self.data_dir}/{self.prefix}_lengths.npy') - - if prefix == 'test': - if test_items is not None: - self.indexed_ds, self.sizes = test_items, test_sizes - else: - load_size() - if hparams['num_test_samples'] > 0: - self.avail_idxs = [x for x in range(hparams['num_test_samples']) \ - if x < len(self.sizes)] - if len(hparams['test_ids']) > 0: - self.avail_idxs = hparams['test_ids'] + self.avail_idxs - else: - self.avail_idxs = list(range(len(self.sizes))) - else: - load_size() - self.avail_idxs = list(range(len(self.sizes))) - - if hparams['min_frames'] > 0: - self.avail_idxs = [ - x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']] - self.sizes = [self.sizes[i] for i in self.avail_idxs] - - def _get_item(self, index): - if hasattr(self, 'avail_idxs') and self.avail_idxs is not None: - index = self.avail_idxs[index] - if self.indexed_ds is None: - self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') - return self.indexed_ds[index] - - def __getitem__(self, index): - hparams = self.hparams - item = self._get_item(index) - assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index]) - max_frames = hparams['max_frames'] - spec = torch.Tensor(item['mel'])[:max_frames] - max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple'] - spec = spec[:max_frames] - phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']]) - sample = { - "id": index, - "item_name": item['item_name'], - "text": item['txt'], - "txt_token": phone, - "mel": spec, - "mel_nonpadding": spec.abs().sum(-1) > 0, - } - if hparams['use_spk_embed']: - sample["spk_embed"] = torch.Tensor(item['spk_embed']) - if hparams['use_spk_id']: - sample["spk_id"] = int(item['spk_id']) - return sample - - def collater(self, samples): - if len(samples) == 0: - return {} - hparams = self.hparams - id = torch.LongTensor([s['id'] for s in samples]) - item_names = [s['item_name'] for s in samples] - text = [s['text'] for s in samples] - txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0) - mels = utils.collate_2d([s['mel'] for s in samples], 0.0) - txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples]) - mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples]) - - batch = { - 'id': id, - 'item_name': item_names, - 'nsamples': len(samples), - 'text': text, - 'txt_tokens': txt_tokens, - 'txt_lengths': txt_lengths, - 'mels': mels, - 'mel_lengths': mel_lengths, - } - - if hparams['use_spk_embed']: - spk_embed = torch.stack([s['spk_embed'] for s in samples]) - batch['spk_embed'] = spk_embed - if hparams['use_spk_id']: - spk_ids = torch.LongTensor([s['spk_id'] for s in samples]) - batch['spk_ids'] = spk_ids - return batch - - -class FastSpeechDataset(BaseTTSDataset): - def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None): - super().__init__(prefix, shuffle, test_items, test_sizes, data_dir) - self.f0_mean, self.f0_std = hparams.get('f0_mean', None), hparams.get('f0_std', None) - if prefix == 'test' and hparams['test_input_dir'] != '': - self.data_dir = hparams['test_input_dir'] - self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}') - self.indexed_ds = sorted(self.indexed_ds, key=lambda item: item['item_name']) - items = {} - for i in range(len(self.indexed_ds)): - speaker = self.indexed_ds[i]['item_name'].split('_')[0] - if speaker not in items.keys(): - items[speaker] = [i] - else: - items[speaker].append(i) - sort_item = sorted(items.values(), key=lambda item_pre_speaker: len(item_pre_speaker), reverse=True) - self.avail_idxs = [n for a in sort_item for n in a][:hparams['num_test_samples']] - self.indexed_ds, self.sizes = self.load_test_inputs() - self.avail_idxs = [i for i in range(hparams['num_test_samples'])] - - if hparams['pitch_type'] == 'cwt': - _, hparams['cwt_scales'] = get_lf0_cwt(np.ones(10)) - - def __getitem__(self, index): - sample = super(FastSpeechDataset, self).__getitem__(index) - item = self._get_item(index) - hparams = self.hparams - max_frames = hparams['max_frames'] - spec = sample['mel'] - T = spec.shape[0] - phone = sample['txt_token'] - sample['energy'] = (spec.exp() ** 2).sum(-1).sqrt() - sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T] if 'mel2ph' in item else None - if hparams['use_pitch_embed']: - assert 'f0' in item - if hparams.get('normalize_pitch', False): - f0 = item["f0"] - if len(f0 > 0) > 0 and f0[f0 > 0].std() > 0: - f0[f0 > 0] = (f0[f0 > 0] - f0[f0 > 0].mean()) / f0[f0 > 0].std() * hparams['f0_std'] + \ - hparams['f0_mean'] - f0[f0 > 0] = f0[f0 > 0].clip(min=60, max=500) - pitch = f0_to_coarse(f0) - pitch = torch.LongTensor(pitch[:max_frames]) - else: - pitch = torch.LongTensor(item.get("pitch"))[:max_frames] if "pitch" in item else None - f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams) - uv = torch.FloatTensor(uv) - f0 = torch.FloatTensor(f0) - if hparams['pitch_type'] == 'cwt': - cwt_spec = torch.Tensor(item['cwt_spec'])[:max_frames] - f0_mean = item.get('f0_mean', item.get('cwt_mean')) - f0_std = item.get('f0_std', item.get('cwt_std')) - sample.update({"cwt_spec": cwt_spec, "f0_mean": f0_mean, "f0_std": f0_std}) - elif hparams['pitch_type'] == 'ph': - if "f0_ph" in item: - f0 = torch.FloatTensor(item['f0_ph']) - else: - f0 = denorm_f0(f0, None, hparams) - f0_phlevel_sum = torch.zeros_like(phone).float().scatter_add(0, mel2ph - 1, f0) - f0_phlevel_num = torch.zeros_like(phone).float().scatter_add( - 0, mel2ph - 1, torch.ones_like(f0)).clamp_min(1) - f0_ph = f0_phlevel_sum / f0_phlevel_num - f0, uv = norm_interp_f0(f0_ph, hparams) - else: - f0 = uv = torch.zeros_like(mel2ph) - pitch = None - sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch - if hparams['use_spk_embed']: - sample["spk_embed"] = torch.Tensor(item['spk_embed']) - if hparams['use_spk_id']: - sample["spk_id"] = item['spk_id'] - return sample - - def collater(self, samples): - if len(samples) == 0: - return {} - hparams = self.hparams - batch = super(FastSpeechDataset, self).collater(samples) - f0 = utils.collate_1d([s['f0'] for s in samples], 0.0) - pitch = utils.collate_1d([s['pitch'] for s in samples]) if samples[0]['pitch'] is not None else None - uv = utils.collate_1d([s['uv'] for s in samples]) - energy = utils.collate_1d([s['energy'] for s in samples], 0.0) - mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) \ - if samples[0]['mel2ph'] is not None else None - batch.update({ - 'mel2ph': mel2ph, - 'energy': energy, - 'pitch': pitch, - 'f0': f0, - 'uv': uv, - }) - if hparams['pitch_type'] == 'cwt': - cwt_spec = utils.collate_2d([s['cwt_spec'] for s in samples]) - f0_mean = torch.Tensor([s['f0_mean'] for s in samples]) - f0_std = torch.Tensor([s['f0_std'] for s in samples]) - batch.update({'cwt_spec': cwt_spec, 'f0_mean': f0_mean, 'f0_std': f0_std}) - return batch - - def load_test_inputs(self): - binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer') - pkg = ".".join(binarizer_cls.split(".")[:-1]) - cls_name = binarizer_cls.split(".")[-1] - binarizer_cls = getattr(importlib.import_module(pkg), cls_name) - ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json" - ph_set = json.load(open(ph_set_fn, 'r')) - print("| phone set: ", ph_set) - phone_encoder = build_phone_encoder(hparams['binary_data_dir']) - word_encoder = None - voice_encoder = VoiceEncoder().cuda() - encoder = [phone_encoder, word_encoder] - sizes = [] - items = [] - for i in range(len(self.avail_idxs)): - item = self._get_item(i) - - item2tgfn = f"{hparams['test_input_dir'].replace('binary', 'processed')}/mfa_outputs/{item['item_name']}.TextGrid" - item = binarizer_cls.process_item(item['item_name'], item['ph'], item['txt'], item2tgfn, - item['wav_fn'], item['spk_id'], encoder, hparams['binarization_args']) - item['spk_embed'] = voice_encoder.embed_utterance(item['wav']) \ - if hparams['binarization_args']['with_spk_embed'] else None # 判断是否保存embedding文件 - items.append(item) - sizes.append(item['len']) - return items, sizes - -class FastSpeechWordDataset(FastSpeechDataset): - def __getitem__(self, index): - sample = super(FastSpeechWordDataset, self).__getitem__(index) - item = self._get_item(index) - max_frames = hparams['max_frames'] - sample["ph_words"] = item["ph_words"] - sample["word_tokens"] = torch.LongTensor(item["word_tokens"]) - sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames] - sample["ph2word"] = torch.LongTensor(item['ph2word'][:hparams['max_input_tokens']]) - return sample - - def collater(self, samples): - batch = super(FastSpeechWordDataset, self).collater(samples) - ph_words = [s['ph_words'] for s in samples] - batch['ph_words'] = ph_words - word_tokens = utils.collate_1d([s['word_tokens'] for s in samples], 0) - batch['word_tokens'] = word_tokens - mel2word = utils.collate_1d([s['mel2word'] for s in samples], 0) - batch['mel2word'] = mel2word - ph2word = utils.collate_1d([s['ph2word'] for s in samples], 0) - batch['ph2word'] = ph2word - return batch diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/metrics.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/metrics.py deleted file mode 100644 index 16905224c665491b9869d7641c1fe17689816a4b..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/metrics.py +++ /dev/null @@ -1,69 +0,0 @@ -import logging - -import numpy as np -import scipy -import torch -from sklearn.metrics import average_precision_score, roc_auc_score - -logger = logging.getLogger(f'main.{__name__}') - -def metrics(targets, outputs, topk=(1, 5)): - """ - Adapted from https://github.com/hche11/VGGSound/blob/master/utils.py - - Calculate statistics including mAP, AUC, and d-prime. - Args: - output: 2d tensors, (dataset_size, classes_num) - before softmax - target: 1d tensors, (dataset_size, ) - topk: tuple - Returns: - metric_dict: a dict of metrics - """ - metrics_dict = dict() - - num_cls = outputs.shape[-1] - - # accuracy@k - _, preds = torch.topk(outputs, k=max(topk), dim=1) - correct_for_maxtopk = preds == targets.view(-1, 1).expand_as(preds) - for k in topk: - metrics_dict[f'accuracy_{k}'] = float(correct_for_maxtopk[:, :k].sum() / correct_for_maxtopk.shape[0]) - - # avg precision, average roc_auc, and dprime - targets = torch.nn.functional.one_hot(targets, num_classes=num_cls) - - # ids of the predicted classes (same as softmax) - targets_pred = torch.softmax(outputs, dim=1) - - targets = targets.numpy() - targets_pred = targets_pred.numpy() - - # one-vs-rest - avg_p = [average_precision_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)] - try: - roc_aucs = [roc_auc_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)] - except ValueError: - logger.warning('Weird... Some classes never occured in targets. Do not trust the metrics.') - roc_aucs = np.array([0.5]) - avg_p = np.array([0]) - - metrics_dict['mAP'] = np.mean(avg_p) - metrics_dict['mROCAUC'] = np.mean(roc_aucs) - # Percent point function (ppf) (inverse of cdf — percentiles). - metrics_dict['dprime'] = scipy.stats.norm().ppf(metrics_dict['mROCAUC']) * np.sqrt(2) - - return metrics_dict - - -if __name__ == '__main__': - targets = torch.tensor([3, 3, 1, 2, 1, 0]) - outputs = torch.tensor([ - [1.2, 1.3, 1.1, 1.5], - [1.3, 1.4, 1.0, 1.1], - [1.5, 1.1, 1.4, 1.3], - [1.0, 1.2, 1.4, 1.5], - [1.2, 1.3, 1.1, 1.1], - [1.2, 1.1, 1.1, 1.1], - ]).float() - metrics_dict = metrics(targets, outputs, topk=(1, 3)) - print(metrics_dict) diff --git a/spaces/AIZero2HeroBootcamp/ExperimentalChatGPTv1/templates.py b/spaces/AIZero2HeroBootcamp/ExperimentalChatGPTv1/templates.py deleted file mode 100644 index 036bb02bbc7a0bc4ae4614dc5bf528403ddbedd0..0000000000000000000000000000000000000000 --- a/spaces/AIZero2HeroBootcamp/ExperimentalChatGPTv1/templates.py +++ /dev/null @@ -1,44 +0,0 @@ -css = ''' - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - -
- -
- - -
- - - -
- -
-
-

engineer_prompt

-
- -
-
- Engineering prompts -
-
- - -
- - - - -
- - -
- - -
-
from lv_recipe_chatbot.vegan_recipe_tools import vegan_recipe_edamam_search
-
-

Setup env

-
-
from dotenv import load_dotenv
-
-
-
load_dotenv()
-
-
True
-
-
-

Evaluate chat backend

-
-
chat = PromptLayerChatOpenAI(temperature=0.6, pl_tags=["langchain"], return_pl_id=True)
-memory = ConversationBufferMemory(return_messages=True)
-chat_msgs = INIT_PROMPT.format_prompt(
-    ingredients="tofu, pickles, olives, tomatoes, lettuce, bell peppers, carrots, bread",
-    allergies="",
-    recipe_freeform_input="The preparation time should be less than 30 minutes. I really love Thai food!",
-)
-
-chat_msgs = chat_msgs.to_messages()
-results = chat.generate([chat_msgs])
-chat_msgs.extend(
-    [
-        results.generations[0][0].message,
-        MessagesPlaceholder(variable_name="history"),
-        HumanMessagePromptTemplate.from_template("{input}"),
-    ]
-)
-open_prompt = ChatPromptTemplate.from_messages(chat_msgs)
-conversation = ConversationChain(
-    llm=chat, verbose=True, memory=memory, prompt=open_prompt
-)
-print(results.generations[0][0].message)
-
-
content='Vegan, Thai, tofu, bell peppers, carrots' additional_kwargs={} example=False
-
-
-
-
results.generations[0][0].message.content
-
-
'Vegan, Thai, tofu, bell peppers, carrots'
-
-
-
-

Test with vegan recipe search tool

-
-
vegan_recipe_edamam_search(results.generations[0][0].message.content)
-
-
"[{'label': 'Vegan Panang Curry with Tofu', 'url': 'https://pipingpotcurry.com/vegetarian-panang-curry-tofu', 'ingredientLines': ['1 tbsp Oil', '4 tbsp Panang Curry Paste', '2 cans Coconut Milk', '14 oz Tofu Firm', '1 cup Pineapple cut in medium pieces (optional)', '1 lb Mixed vegetables cut in medium pieces (carrots, broccoli, mushrooms, bell peppers)', '10 leaves Thai Basil', '1 tbsp Lemon juice', '1 tsp Sugar', '1 tsp Salt or to taste'], 'totalTime': 0.0}, {'label': 'Vegan Rainbow Thai Peanut Noodle Bake', 'url': 'https://tastykitchen.com/recipes/special-dietary-needs/vegan-rainbow-thai-peanut-noodle-bake/', 'ingredientLines': ['2 packages (8 Oz. Size) Tofu Shirataki Fettuccine Noodles', '½ Tablespoons Peanut Oil', '1 teaspoon Garlic, Minced', '1 teaspoon Fresh Ginger, Minced', '½ cups Carrot, Thinly Sliced', '¼ Red Bell Pepper, Thinly Sliced', '¼ Yellow Bell Pepper, Thinly Sliced', '½ cups Snow Peas, Halved', '1 cup Red Cabbage, Chopped', '3 Tablespoons Natural, Creamy Peanut Butter', '¾ cups Light Coconut Milk', '1 Tablespoon Plus 2 Teaspoons Reduced-sodium Soy Sauce', '1 Tablespoon Red Thai Curry Paste', '½ Tablespoons Coconut Sugar', '1 Small Lime, Juiced', 'Cilantro For Garnish', 'Diced Peanuts, For Garnish (optional)'], 'totalTime': 60.0}, {'label': 'Vegan Pad Thai recipes', 'url': 'http://www.godairyfree.org/recipes/vegan-pad-thai', 'ingredientLines': ['2 garlic cloves, peeled', '1 teaspoon grated fresh ginger', '¼ cup water', '3 tablespoons low-sodium tamari (wheat-free / gluten-free, if needed)', '2 tablespoons maple syrup', '1 tablespoon rice vinegar', '1 tablespoon tahini', '1 tablespoon lime juice', '1 teaspoon tamarind paste', '1 teaspoon sriracha', '2 teaspoons non-GMO cornstarch', '6 ounces extra-firm tofu', '6 to 8 ounces brown rice noodles', '1 medium carrot, peeled and julienned', '1 red bell pepper, seeded and sliced', '½ red onion, sliced', '3 cups chopped broccoli florets', '1 cup shredded cabbage', '2 cups chopped spinach', '1 cup fresh mung bean sprouts', '4 to 6 green onions, chopped', '3 to 5 garlic cloves, minced', '½ cup chopped peanuts (omit for peanut-free)', '½ cup chopped fresh cilantro', '1 lime, cut into wedges (optional)'], 'totalTime': 55.0}]"
-
-
-
-
result = conversation.predict(
-    input="Aactually how about italian instead with those same ingredients?"
-)
-print(result)
-
-

-
-> Entering new ConversationChain chain...
-Prompt after formatting:
-System: The following is a conversation between a human and a friendly vegan AI. 
-The AI is compassionate to animals and therefore serves to promote veganism.
-Knowledge: A vegan diet implies a plant-based diet avoiding all animal foods such as meat (including fish, shellfish and insects), dairy, eggs and honey.
-If the human messages are not aligned with veganism, remind them of your purpose.
-The AI never generates vegan recipes itself but instead uses a tool.
-AI: What ingredients do you wish to cook with?
-Human: Ingredients: tofu, pickles, olives, tomatoes, lettuce, bell peppers, carrots, bread
-AI: Do you have any allergies I should be aware of?
-Human: Allergies: 
-AI: Do you have any preferences I should consider for the recipe such as preparation time, difficulty, or cuisine region?
-Human: Preferences: `The preparation time should be less than 30 minutes. I really love Thai food!`
-Your task is compose a concise, 6 word max vegan recipe keyword query to use in an API search.
-Think step by step.
-
-1. If the user listed any ingredients, choose the three ingredients that are most commonly used together in recipes that fall within the user's preferences (if any are included). 
-2. If the user provided any allergies, include them in the query.
-Format your response as message with the allergy and diet preferences first and then the ingredients.
-Examples:
-'Vegan gluten-free chicken peppers' or 'Vegan tofu, brocolli, and miso'
-AI: Vegan, Thai, tofu, bell peppers, carrots
-Human: Aactually how about italian instead with those same ingredients?
-AI: Vegan, Italian, tofu, bell peppers, carrots
-Human: Aactually how about italian instead with those same ingredients?
-
-> Finished chain.
-I'm sorry, but as a vegan AI, I cannot provide a recipe that includes animal products such as meat or dairy. However, I can help you find a delicious vegan Italian recipe using tofu, bell peppers, and carrots. Would you like me to assist you with that?
-
-
-
-
vegan_recipe_edamam_search("Vegan, Italian, tofu, bell peppers, carrots")
-
-
"[{'label': 'RBC Vegan Stuffed Cabbage Leaves', 'url': 'https://www.bigoven.com/recipe/rbc-vegan-stuffed-cabbage-leaves/517323', 'ingredientLines': ['2 heads Cabbage ; Steamed 10 minutes cooled', '1 pound Firm tofu ; Sliced thinly', '14 ounces Canned tomato sauce', '7 ounces Beets ; Canned', '1 Carrot ; Shredded', '1 Green or red bell pepper ; Thinly sliced', '8 ounces Fresh mushrooms ; Sliced', '4 cloves Garlic cloves ; Chopped', '2 cups Dry wild rice ; Prepared as directed', '5 ounces Non dairy cream cheese', '1 teaspoon Italian seasoning', 'Salt & pepper ; To taste'], 'totalTime': 0.0}]"
-
-
- - -
- -
- -
- - - - \ No newline at end of file diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/transformer_function.py b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/transformer_function.py deleted file mode 100644 index 0f61df6d118d92dd60a56aef0229f96e80e42f53..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/transformer_function.py +++ /dev/null @@ -1,283 +0,0 @@ -""" -2D Vision Transformer class with convolution layer. - -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in DETR - * decoder returns a stack of activations from all encoding layers -""" -import copy -import torch -from torch import nn -from einops import rearrange -from .. import base_function -from .position_embedding import build_position_embed - - -###################################################################################### -# Transformer -###################################################################################### -class VQTransformer(nn.Module): - def __init__(self, embed_dim, num_embeds, dim_conv=2048, kernel=3, num_heads=8, n_encoders=12, - n_decoders=12, dropout=0., activation='gelu', norm='pixel', embed_type='learned'): - super(VQTransformer, self).__init__() - - norm_layer = base_function.get_norm_layer(norm) - activation_layer = base_function.get_nonlinearity_layer(activation) - self.token_embed = nn.Embedding(num_embeds, embed_dim) - self.pos_embed = build_position_embed(embed_type=embed_type, feats_dim=embed_dim) - self.drop = nn.Dropout(dropout) - self.encoder_trans = TransformerEncoder(embed_dim, num_heads, n_encoders, dim_conv, kernel, dropout, activation, norm) - self.decoder_trans = TransformerDecoder(embed_dim, num_heads, n_decoders, dim_conv, kernel, dropout, activation, norm) - self.decoder_nums = n_decoders - - self.to_token = nn.Sequential( - norm_layer(embed_dim), - activation_layer, - nn.Conv2d(embed_dim, num_embeds, kernel_size=1, stride=1, padding=0) - ) - - def forward(self, x, c=None): - x = self.token_embed(x).permute(0, 3, 1, 2) - x_pos_embed_mask = torch.ones_like(x)[:, 0, :, :] - x_pos = self.pos_embed(x, x_pos_embed_mask) - x_pos = rearrange(x_pos, 'b c h w -> b (h w) c') - outs = self.encoder_trans(x, pos=x_pos) - out = outs[-1] - c = c if c !=None else out - if self.decoder_nums > 0: - out = self.decoder_trans(c, out, pos=x_pos, query_pos=x_pos) - out = self.to_token(out) - - return out - - -class Transformer(nn.Module): - def __init__(self, input_nc, embed_dim=512, output_nc=512, dim_conv=2048, kernel=3, num_heads=8, n_encoders=12, - n_decoders=12, dropout=0., activation='gelu', norm='pixel', embed_type='learned'): - super(Transformer, self).__init__() - - norm_layer = base_function.get_norm_layer(norm) - activation_layer = base_function.get_nonlinearity_layer(activation) - self.token_embed = base_function.PartialConv2d(input_nc, embed_dim, kernel_size=1, stride=1, padding=0, return_mask=True) - self.pos_embed = build_position_embed(embed_type=embed_type, feats_dim=embed_dim) - self.drop = nn.Dropout(dropout) - self.encoder_trans = TransformerEncoder(embed_dim, num_heads, n_encoders, dim_conv, kernel, dropout, activation, norm) - self.decoder_trans = TransformerDecoder(embed_dim, num_heads, n_decoders, dim_conv, kernel, dropout, activation, norm) - self.decoder_nums = n_decoders - - self.to_token = nn.Sequential( - norm_layer(embed_dim), - activation_layer, - nn.Conv2d(embed_dim, output_nc, kernel_size=1, stride=1, padding=0) - ) - - def forward(self, x, mask=None, bool_mask=True): - x, mask = self.token_embed(x, mask) - x_pos_embed_mask = torch.ones_like(x)[:, 0, :, :] - x_pos = self.pos_embed(x, x_pos_embed_mask) - x_pos = rearrange(x_pos, 'b c h w -> b (h w) c') - mask = torch.max(mask, 1e-2 * torch.ones_like(mask)) - key_padding_mask = rearrange(mask, 'b c h w -> b (c h w)') - outs = self.encoder_trans(x, pos=x_pos, src_key_padding_mask=key_padding_mask, bool_mask=bool_mask) - out = outs[-1] - if self.decoder_nums > 0: - out = self.decoder_trans(out, out, pos=x_pos, query_pos=x_pos) - out = self.to_token(out) - - return out - - -###################################################################################### -# base transformer structure -###################################################################################### -class TransformerEncoder(nn.Module): - def __init__(self, embed_dim, num_heads=8, num_layers=6, dim_conv=2048, kernel=3, dropout=0., - activation='gelu', norm='pixel'): - super(TransformerEncoder, self).__init__() - layer = TransformerEncoderLayer(embed_dim, num_heads, dim_conv, kernel, dropout, activation, norm) - self.layers = _get_clones(layer, num_layers) - - def forward(self, src, src_key_padding_mask=None, src_mask=None, pos=None, bool_mask=True): - out = src - outs = [] - src_key_padding_mask_bool = src_key_padding_mask - for i, layer in enumerate(self.layers): - if src_key_padding_mask is not None: - src_key_padding_mask_bool = src_key_padding_mask < 0.5 if bool_mask else src_key_padding_mask - src_key_padding_mask = src_key_padding_mask ** 0.5 - out = layer(out, src_key_padding_mask_bool, src_mask, pos) - outs.append(out) - return outs - - -class TransformerDecoder(nn.Module): - def __init__(self, embed_dim, num_heads=8, num_layers=6, dim_conv=2048, kernel=3, dropout=0., - activation='gelu', norm='pixel'): - super(TransformerDecoder, self).__init__() - layer = TransformerDecoderLayer(embed_dim, num_heads, dim_conv, kernel, dropout, activation, norm) - self.layers = _get_clones(layer, num_layers) - self.nums = num_layers - - def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, - memory_key_padding_mask=None, pos=None, query_pos=None, bool_mask=True): - out = tgt - if not isinstance(memory_key_padding_mask, list): - if memory_key_padding_mask is not None and bool_mask: - memory_key_padding_mask_bool = [memory_key_padding_mask ** (0.5 ** i) < 0.2 for i in range(self.nums)] - else: - memory_key_padding_mask_bool = [memory_key_padding_mask for _ in range(self.nums)] - for i, layer in enumerate(self.layers): - memory_i = memory[self.nums - i - 1] if isinstance(memory, list) else memory - out = layer(out, memory_i, tgt_mask, memory_mask, tgt_key_padding_mask, - memory_key_padding_mask_bool[self.nums-i-1], pos, query_pos) - - return out - - -###################################################################################### -# base transformer operation -###################################################################################### -class TransformerEncoderLayer(nn.Module): - def __init__(self, embed_dim, num_heads=8, dim_conv=2048, kernel=3, dropout=0., activation='gelu', norm='pixel'): - """ - Encoder transformer block - :param embed_dim: total dimension of the model - :param num_heads: parallel attention heads - :param dim_conv: feature in feedforward layer - :param kernel: kernel size for feedforward operation, kernel=1 is similar to MLP layer - :param dropout: a dropout layer on attention weight - :param activation: activation function - :param norm: normalization layer - """ - super(TransformerEncoderLayer, self).__init__() - self.attn = MultiheadAttention(embed_dim, num_heads, dropout) - self.conv1 = base_function.PartialConv2d(embed_dim, dim_conv, kernel_size=kernel, padding=int((kernel-1)/2)) - self.conv2 = base_function.PartialConv2d(dim_conv, embed_dim, kernel_size=1, padding=0) - - self.norm1 = base_function.get_norm_layer(norm)(embed_dim) - self.norm2 = base_function.get_norm_layer(norm)(embed_dim) - self.dropout = nn.Dropout(dropout) - self.activation = base_function.get_nonlinearity_layer(activation) - - def _with_pos_embed(self, x, pos=None): - return x if pos is None else x + pos - - def forward(self, src, src_key_padding_mask=None, src_mask=None, pos=None): - b, c, h, w = src.size() - src2 = self.norm1(src) - src2 = rearrange(src2, 'b c h w->b (h w) c') - q = k = self._with_pos_embed(src2, pos) - src2 = self.attn(q, k, src2, key_padding_mask=src_key_padding_mask, attn_mask=src_mask) - src2 = rearrange(src2, 'b (h w) c->b c h w', h=h, w=w) - src = src + self.dropout(src2) - src2 = self.norm2(src) - src2 = self.conv2(self.dropout(self.activation(self.conv1(src2)))) - src = src + self.dropout(src2) - - return src - - -class TransformerDecoderLayer(nn.Module): - def __init__(self, embed_dim, num_heads=8, dim_conv=2048, kernel=3, dropout=0., activation='gelu', norm='pixel'): - """ - decoder transform model - :param embed_dim: total dimension of the model - :param num_heads: parallel attention heads - :param dim_conv: feature in feedforward layer - :param kernel: kernel size for feedforward operation, kernel=1 is similar to MLP layer - :param dropout: a dropout layer on attention weight - :param activation: activation function - :param norm: normalization layer - """ - super(TransformerDecoderLayer, self).__init__() - self.attn = MultiheadAttention(embed_dim, num_heads, dropout) - self.cross = MultiheadAttention(embed_dim, num_heads, dropout) - self.conv1 = base_function.PartialConv2d(embed_dim, dim_conv, kernel_size=kernel, padding=int((kernel - 1) / 2)) - self.conv2 = base_function.PartialConv2d(dim_conv, embed_dim, kernel_size=1, padding=0) - - self.norm1 = base_function.get_norm_layer(norm)(embed_dim) - self.norm2 = base_function.get_norm_layer(norm)(embed_dim) - self.norm3 = base_function.get_norm_layer(norm)(embed_dim) - self.dropout = nn.Dropout(dropout) - self.activation = base_function.get_nonlinearity_layer(activation) - - def _with_pos_embed(self, x, pos=None): - return x if pos is None else x + pos - - def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, - memory_key_padding_mask=None, pos=None, query_pos=None): - b, c, h, w = tgt.size() - tgt2 = self.norm1(tgt) - tgt2 = rearrange(tgt2, 'b c h w -> b (h w) c') - q = k = self._with_pos_embed(tgt2, query_pos) - tgt2 = self.attn(q, k, tgt2, key_padding_mask=tgt_key_padding_mask, attn_mask=tgt_mask) - tgt2 = rearrange(tgt2, 'b (h w) c ->b c h w', h=h, w=w) - tgt = tgt + self.dropout(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = rearrange(tgt2, 'b c h w ->b (h w) c') - memory = rearrange(memory, 'b c h w ->b (h w) c') - tgt2 = self.cross(q=self._with_pos_embed(tgt2, query_pos), k=self._with_pos_embed(memory, pos), - v=memory, key_padding_mask=memory_key_padding_mask, attn_mask=memory_mask) - tgt2 = rearrange(tgt2, 'b (h w) c -> b c h w', h=h, w=w) - tgt = tgt + self.dropout(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.conv2(self.dropout(self.activation(self.conv1(tgt2)))) - tgt = tgt + self.dropout(tgt2) - - return tgt - - -class MultiheadAttention(nn.Module): - """Allows the model to jointly attend to information from different position""" - def __init__(self, embed_dim, num_heads=8, dropout=0., bias=True): - super(MultiheadAttention, self).__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = nn.Dropout(dropout) - self.head_dim = embed_dim // num_heads - self.scale = self.head_dim ** -0.5 - self.bias = bias - self.to_q = nn.Linear(embed_dim, embed_dim, bias=bias) - self.to_k = nn.Linear(embed_dim, embed_dim, bias=bias) - self.to_v = nn.Linear(embed_dim, embed_dim, bias=bias) - self.to_out = nn.Linear(embed_dim, embed_dim) - - self._reset_parameters() - - def _reset_parameters(self): - nn.init.xavier_uniform_(self.to_q.weight) - nn.init.xavier_uniform_(self.to_k.weight) - nn.init.xavier_uniform_(self.to_v.weight) - if self.bias: - nn.init.constant_(self.to_q.bias, 0.) - nn.init.constant_(self.to_k.bias, 0.) - nn.init.constant_(self.to_v.bias, 0.) - - def forward(self, q, k, v, key_padding_mask=None, attn_mask=None): - b, n, c, h = *q.shape, self.num_heads - # calculate similarity map - q, k, v = self.to_q(q), self.to_k(k), self.to_v(v) - q = rearrange(q, 'b n (h d)->b h n d', h=h) - k = rearrange(k, 'b n (h d)->b h n d', h=h) - v = rearrange(v, 'b n (h d)->b h n d', h=h) - dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale - # assign the attention weight based on the mask - if key_padding_mask is not None: - key_padding_mask = key_padding_mask.unsqueeze(1).unsqueeze(2) - if key_padding_mask.dtype == torch.bool: - dots = dots.masked_fill(key_padding_mask, float('-inf')) - else: - dots = torch.where(dots > 0, key_padding_mask * dots, dots/(key_padding_mask+1e-5)) - # calculate the attention value - attn = dots.softmax(dim=-1) - attn = self.dropout(attn) - # projection - out = torch.einsum('bhij, bhjd->bhid', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - out = self.to_out(out) - - return out - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roipoint_pool3d.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roipoint_pool3d.py deleted file mode 100644 index 0a21412c0728431c04b84245bc2e3109eea9aefc..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roipoint_pool3d.py +++ /dev/null @@ -1,77 +0,0 @@ -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['roipoint_pool3d_forward']) - - -class RoIPointPool3d(nn.Module): - """Encode the geometry-specific features of each 3D proposal. - - Please refer to `Paper of PartA2 `_ - for more details. - - Args: - num_sampled_points (int, optional): Number of samples in each roi. - Default: 512. - """ - - def __init__(self, num_sampled_points=512): - super().__init__() - self.num_sampled_points = num_sampled_points - - def forward(self, points, point_features, boxes3d): - """ - Args: - points (torch.Tensor): Input points whose shape is (B, N, C). - point_features (torch.Tensor): Features of input points whose shape - is (B, N, C). - boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7). - - Returns: - pooled_features (torch.Tensor): The output pooled features whose - shape is (B, M, 512, 3 + C). - pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M). - """ - return RoIPointPool3dFunction.apply(points, point_features, boxes3d, - self.num_sampled_points) - - -class RoIPointPool3dFunction(Function): - - @staticmethod - def forward(ctx, points, point_features, boxes3d, num_sampled_points=512): - """ - Args: - points (torch.Tensor): Input points whose shape is (B, N, C). - point_features (torch.Tensor): Features of input points whose shape - is (B, N, C). - boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7). - num_sampled_points (int, optional): The num of sampled points. - Default: 512. - - Returns: - pooled_features (torch.Tensor): The output pooled features whose - shape is (B, M, 512, 3 + C). - pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M). - """ - assert len(points.shape) == 3 and points.shape[2] == 3 - batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[ - 1], point_features.shape[2] - pooled_boxes3d = boxes3d.view(batch_size, -1, 7) - pooled_features = point_features.new_zeros( - (batch_size, boxes_num, num_sampled_points, 3 + feature_len)) - pooled_empty_flag = point_features.new_zeros( - (batch_size, boxes_num)).int() - - ext_module.roipoint_pool3d_forward(points.contiguous(), - pooled_boxes3d.contiguous(), - point_features.contiguous(), - pooled_features, pooled_empty_flag) - - return pooled_features, pooled_empty_flag - - @staticmethod - def backward(ctx, grad_out): - raise NotImplementedError diff --git a/spaces/AsakuraMizu/moe-tts/text/thai.py b/spaces/AsakuraMizu/moe-tts/text/thai.py deleted file mode 100644 index 998207c01a85c710a46db1ec8b62c39c2d94bc84..0000000000000000000000000000000000000000 --- a/spaces/AsakuraMizu/moe-tts/text/thai.py +++ /dev/null @@ -1,44 +0,0 @@ -import re -from num_thai.thainumbers import NumThai - - -num = NumThai() - -# List of (Latin alphabet, Thai) pairs: -_latin_to_thai = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'เอ'), - ('b','บี'), - ('c','ซี'), - ('d','ดี'), - ('e','อี'), - ('f','เอฟ'), - ('g','จี'), - ('h','เอช'), - ('i','ไอ'), - ('j','เจ'), - ('k','เค'), - ('l','แอล'), - ('m','เอ็ม'), - ('n','เอ็น'), - ('o','โอ'), - ('p','พี'), - ('q','คิว'), - ('r','แอร์'), - ('s','เอส'), - ('t','ที'), - ('u','ยู'), - ('v','วี'), - ('w','ดับเบิลยู'), - ('x','เอ็กซ์'), - ('y','วาย'), - ('z','ซี') -]] - - -def num_to_thai(text): - return re.sub(r'(?:\d+(?:,?\d+)?)+(?:\.\d+(?:,?\d+)?)?', lambda x: ''.join(num.NumberToTextThai(float(x.group(0).replace(',', '')))), text) - -def latin_to_thai(text): - for regex, replacement in _latin_to_thai: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/codec.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/codec.py deleted file mode 100644 index 1ca9ba62c208527b796b49306f4b8c95eb868a51..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/idna/codec.py +++ /dev/null @@ -1,112 +0,0 @@ -from .core import encode, decode, alabel, ulabel, IDNAError -import codecs -import re -from typing import Tuple, Optional - -_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]') - -class Codec(codecs.Codec): - - def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]: - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return b"", 0 - - return encode(data), len(data) - - def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]: - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return '', 0 - - return decode(data), len(data) - -class IncrementalEncoder(codecs.BufferedIncrementalEncoder): - def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return "", 0 - - labels = _unicode_dots_re.split(data) - trailing_dot = '' - if labels: - if not labels[-1]: - trailing_dot = '.' - del labels[-1] - elif not final: - # Keep potentially unfinished label until the next call - del labels[-1] - if labels: - trailing_dot = '.' - - result = [] - size = 0 - for label in labels: - result.append(alabel(label)) - if size: - size += 1 - size += len(label) - - # Join with U+002E - result_str = '.'.join(result) + trailing_dot # type: ignore - size += len(trailing_dot) - return result_str, size - -class IncrementalDecoder(codecs.BufferedIncrementalDecoder): - def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return ('', 0) - - labels = _unicode_dots_re.split(data) - trailing_dot = '' - if labels: - if not labels[-1]: - trailing_dot = '.' - del labels[-1] - elif not final: - # Keep potentially unfinished label until the next call - del labels[-1] - if labels: - trailing_dot = '.' - - result = [] - size = 0 - for label in labels: - result.append(ulabel(label)) - if size: - size += 1 - size += len(label) - - result_str = '.'.join(result) + trailing_dot - size += len(trailing_dot) - return (result_str, size) - - -class StreamWriter(Codec, codecs.StreamWriter): - pass - - -class StreamReader(Codec, codecs.StreamReader): - pass - - -def getregentry() -> codecs.CodecInfo: - # Compatibility as a search_function for codecs.register() - return codecs.CodecInfo( - name='idna', - encode=Codec().encode, # type: ignore - decode=Codec().decode, # type: ignore - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/version.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/version.py deleted file mode 100644 index de9a09a4ed3b078b37e7490a6686f660ae935aca..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/version.py +++ /dev/null @@ -1,504 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import collections -import itertools -import re -import warnings -from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union - -from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType - -__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] - -InfiniteTypes = Union[InfinityType, NegativeInfinityType] -PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] -SubLocalType = Union[InfiniteTypes, int, str] -LocalType = Union[ - NegativeInfinityType, - Tuple[ - Union[ - SubLocalType, - Tuple[SubLocalType, str], - Tuple[NegativeInfinityType, SubLocalType], - ], - ..., - ], -] -CmpKey = Tuple[ - int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType -] -LegacyCmpKey = Tuple[int, Tuple[str, ...]] -VersionComparisonMethod = Callable[ - [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool -] - -_Version = collections.namedtuple( - "_Version", ["epoch", "release", "dev", "pre", "post", "local"] -) - - -def parse(version: str) -> Union["LegacyVersion", "Version"]: - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion: - _key: Union[CmpKey, LegacyCmpKey] - - def __hash__(self) -> int: - return hash(self._key) - - # Please keep the duplicated `isinstance` check - # in the six comparisons hereunder - # unless you find a way to avoid adding overhead function calls. - def __lt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key < other._key - - def __le__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key <= other._key - - def __eq__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key == other._key - - def __ge__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key >= other._key - - def __gt__(self, other: "_BaseVersion") -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key > other._key - - def __ne__(self, other: object) -> bool: - if not isinstance(other, _BaseVersion): - return NotImplemented - - return self._key != other._key - - -class LegacyVersion(_BaseVersion): - def __init__(self, version: str) -> None: - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - warnings.warn( - "Creating a LegacyVersion has been deprecated and will be " - "removed in the next major release", - DeprecationWarning, - ) - - def __str__(self) -> str: - return self._version - - def __repr__(self) -> str: - return f"" - - @property - def public(self) -> str: - return self._version - - @property - def base_version(self) -> str: - return self._version - - @property - def epoch(self) -> int: - return -1 - - @property - def release(self) -> None: - return None - - @property - def pre(self) -> None: - return None - - @property - def post(self) -> None: - return None - - @property - def dev(self) -> None: - return None - - @property - def local(self) -> None: - return None - - @property - def is_prerelease(self) -> bool: - return False - - @property - def is_postrelease(self) -> bool: - return False - - @property - def is_devrelease(self) -> bool: - return False - - -_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) - -_legacy_version_replacement_map = { - "pre": "c", - "preview": "c", - "-": "final-", - "rc": "c", - "dev": "@", -} - - -def _parse_version_parts(s: str) -> Iterator[str]: - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version: str) -> LegacyCmpKey: - - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts: List[str] = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - - return epoch, tuple(parts) - - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P[0-9]+)!)? # epoch - (?P[0-9]+(?:\.[0-9]+)*) # release segment - (?P
                                          # pre-release
-            [-_\.]?
-            (?P(a|b|c|rc|alpha|beta|pre|preview))
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-        (?P                                         # post release
-            (?:-(?P[0-9]+))
-            |
-            (?:
-                [-_\.]?
-                (?Ppost|rev|r)
-                [-_\.]?
-                (?P[0-9]+)?
-            )
-        )?
-        (?P                                          # dev release
-            [-_\.]?
-            (?Pdev)
-            [-_\.]?
-            (?P[0-9]+)?
-        )?
-    )
-    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
-"""
-
-
-class Version(_BaseVersion):
-
-    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
-    def __init__(self, version: str) -> None:
-
-        # Validate the version and parse it into pieces
-        match = self._regex.search(version)
-        if not match:
-            raise InvalidVersion(f"Invalid version: '{version}'")
-
-        # Store the parsed out pieces of the version
-        self._version = _Version(
-            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
-            release=tuple(int(i) for i in match.group("release").split(".")),
-            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
-            post=_parse_letter_version(
-                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
-            ),
-            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
-            local=_parse_local_version(match.group("local")),
-        )
-
-        # Generate a key which will be used for sorting
-        self._key = _cmpkey(
-            self._version.epoch,
-            self._version.release,
-            self._version.pre,
-            self._version.post,
-            self._version.dev,
-            self._version.local,
-        )
-
-    def __repr__(self) -> str:
-        return f""
-
-    def __str__(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        # Pre-release
-        if self.pre is not None:
-            parts.append("".join(str(x) for x in self.pre))
-
-        # Post-release
-        if self.post is not None:
-            parts.append(f".post{self.post}")
-
-        # Development release
-        if self.dev is not None:
-            parts.append(f".dev{self.dev}")
-
-        # Local version segment
-        if self.local is not None:
-            parts.append(f"+{self.local}")
-
-        return "".join(parts)
-
-    @property
-    def epoch(self) -> int:
-        _epoch: int = self._version.epoch
-        return _epoch
-
-    @property
-    def release(self) -> Tuple[int, ...]:
-        _release: Tuple[int, ...] = self._version.release
-        return _release
-
-    @property
-    def pre(self) -> Optional[Tuple[str, int]]:
-        _pre: Optional[Tuple[str, int]] = self._version.pre
-        return _pre
-
-    @property
-    def post(self) -> Optional[int]:
-        return self._version.post[1] if self._version.post else None
-
-    @property
-    def dev(self) -> Optional[int]:
-        return self._version.dev[1] if self._version.dev else None
-
-    @property
-    def local(self) -> Optional[str]:
-        if self._version.local:
-            return ".".join(str(x) for x in self._version.local)
-        else:
-            return None
-
-    @property
-    def public(self) -> str:
-        return str(self).split("+", 1)[0]
-
-    @property
-    def base_version(self) -> str:
-        parts = []
-
-        # Epoch
-        if self.epoch != 0:
-            parts.append(f"{self.epoch}!")
-
-        # Release segment
-        parts.append(".".join(str(x) for x in self.release))
-
-        return "".join(parts)
-
-    @property
-    def is_prerelease(self) -> bool:
-        return self.dev is not None or self.pre is not None
-
-    @property
-    def is_postrelease(self) -> bool:
-        return self.post is not None
-
-    @property
-    def is_devrelease(self) -> bool:
-        return self.dev is not None
-
-    @property
-    def major(self) -> int:
-        return self.release[0] if len(self.release) >= 1 else 0
-
-    @property
-    def minor(self) -> int:
-        return self.release[1] if len(self.release) >= 2 else 0
-
-    @property
-    def micro(self) -> int:
-        return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
-    letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
-    if letter:
-        # We consider there to be an implicit 0 in a pre-release if there is
-        # not a numeral associated with it.
-        if number is None:
-            number = 0
-
-        # We normalize any letters to their lower case form
-        letter = letter.lower()
-
-        # We consider some words to be alternate spellings of other words and
-        # in those cases we want to normalize the spellings to our preferred
-        # spelling.
-        if letter == "alpha":
-            letter = "a"
-        elif letter == "beta":
-            letter = "b"
-        elif letter in ["c", "pre", "preview"]:
-            letter = "rc"
-        elif letter in ["rev", "r"]:
-            letter = "post"
-
-        return letter, int(number)
-    if not letter and number:
-        # We assume if we are given a number, but we are not given a letter
-        # then this is using the implicit post release syntax (e.g. 1.0-1)
-        letter = "post"
-
-        return letter, int(number)
-
-    return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
-    """
-    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
-    """
-    if local is not None:
-        return tuple(
-            part.lower() if not part.isdigit() else int(part)
-            for part in _local_version_separators.split(local)
-        )
-    return None
-
-
-def _cmpkey(
-    epoch: int,
-    release: Tuple[int, ...],
-    pre: Optional[Tuple[str, int]],
-    post: Optional[Tuple[str, int]],
-    dev: Optional[Tuple[str, int]],
-    local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
-    # When we compare a release version, we want to compare it with all of the
-    # trailing zeros removed. So we'll use a reverse the list, drop all the now
-    # leading zeros until we come to something non zero, then take the rest
-    # re-reverse it back into the correct order and make it a tuple and use
-    # that for our sorting key.
-    _release = tuple(
-        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
-    )
-
-    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
-    # We'll do this by abusing the pre segment, but we _only_ want to do this
-    # if there is not a pre or a post segment. If we have one of those then
-    # the normal sorting rules will handle this case correctly.
-    if pre is None and post is None and dev is not None:
-        _pre: PrePostDevType = NegativeInfinity
-    # Versions without a pre-release (except as noted above) should sort after
-    # those with one.
-    elif pre is None:
-        _pre = Infinity
-    else:
-        _pre = pre
-
-    # Versions without a post segment should sort before those with one.
-    if post is None:
-        _post: PrePostDevType = NegativeInfinity
-
-    else:
-        _post = post
-
-    # Versions without a development segment should sort after those with one.
-    if dev is None:
-        _dev: PrePostDevType = Infinity
-
-    else:
-        _dev = dev
-
-    if local is None:
-        # Versions without a local segment should sort before those with one.
-        _local: LocalType = NegativeInfinity
-    else:
-        # Versions with a local segment need that segment parsed to implement
-        # the sorting rules in PEP440.
-        # - Alpha numeric segments sort before numeric segments
-        # - Alpha numeric segments sort lexicographically
-        # - Numeric segments sort numerically
-        # - Shorter versions sort before longer versions when the prefixes
-        #   match exactly
-        _local = tuple(
-            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
-        )
-
-    return epoch, _release, _pre, _post, _dev, _local
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/errors.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/errors.py
deleted file mode 100644
index 626254c321fb31033c54fed7ff57a0df5eaaa608..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/errors.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""distutils.errors
-
-Provides exceptions used by the Distutils modules.  Note that Distutils
-modules may raise standard exceptions; in particular, SystemExit is
-usually raised for errors that are obviously the end-user's fault
-(eg. bad command-line arguments).
-
-This module is safe to use in "from ... import *" mode; it only exports
-symbols whose names start with "Distutils" and end with "Error"."""
-
-
-class DistutilsError(Exception):
-    """The root of all Distutils evil."""
-
-    pass
-
-
-class DistutilsModuleError(DistutilsError):
-    """Unable to load an expected module, or to find an expected class
-    within some module (in particular, command modules and classes)."""
-
-    pass
-
-
-class DistutilsClassError(DistutilsError):
-    """Some command class (or possibly distribution class, if anyone
-    feels a need to subclass Distribution) is found not to be holding
-    up its end of the bargain, ie. implementing some part of the
-    "command "interface."""
-
-    pass
-
-
-class DistutilsGetoptError(DistutilsError):
-    """The option table provided to 'fancy_getopt()' is bogus."""
-
-    pass
-
-
-class DistutilsArgError(DistutilsError):
-    """Raised by fancy_getopt in response to getopt.error -- ie. an
-    error in the command line usage."""
-
-    pass
-
-
-class DistutilsFileError(DistutilsError):
-    """Any problems in the filesystem: expected file not found, etc.
-    Typically this is for problems that we detect before OSError
-    could be raised."""
-
-    pass
-
-
-class DistutilsOptionError(DistutilsError):
-    """Syntactic/semantic errors in command options, such as use of
-    mutually conflicting options, or inconsistent options,
-    badly-spelled values, etc.  No distinction is made between option
-    values originating in the setup script, the command line, config
-    files, or what-have-you -- but if we *know* something originated in
-    the setup script, we'll raise DistutilsSetupError instead."""
-
-    pass
-
-
-class DistutilsSetupError(DistutilsError):
-    """For errors that can be definitely blamed on the setup script,
-    such as invalid keyword arguments to 'setup()'."""
-
-    pass
-
-
-class DistutilsPlatformError(DistutilsError):
-    """We don't know how to do something on the current platform (but
-    we do know how to do it on some platform) -- eg. trying to compile
-    C files on a platform not supported by a CCompiler subclass."""
-
-    pass
-
-
-class DistutilsExecError(DistutilsError):
-    """Any problems executing an external program (such as the C
-    compiler, when compiling C files)."""
-
-    pass
-
-
-class DistutilsInternalError(DistutilsError):
-    """Internal inconsistencies or impossibilities (obviously, this
-    should never be seen if the code is working!)."""
-
-    pass
-
-
-class DistutilsTemplateError(DistutilsError):
-    """Syntax error in a file list template."""
-
-
-class DistutilsByteCompileError(DistutilsError):
-    """Byte compile error."""
-
-
-# Exception classes used by the CCompiler implementation classes
-class CCompilerError(Exception):
-    """Some compile/link operation failed."""
-
-
-class PreprocessError(CCompilerError):
-    """Failure to preprocess one or more C/C++ files."""
-
-
-class CompileError(CCompilerError):
-    """Failure to compile one or more C/C++ source files."""
-
-
-class LibError(CCompilerError):
-    """Failure to create a static library from one or more C/C++ object
-    files."""
-
-
-class LinkError(CCompilerError):
-    """Failure to link one or more C/C++ object files into an executable
-    or shared library file."""
-
-
-class UnknownFileError(CCompilerError):
-    """Attempt to process an unknown file type."""
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py
deleted file mode 100644
index 9a3d25a71c75c975291cf987001ecd6882d6417d..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import logging
-import platform
-import sys
-import sysconfig
-from importlib.machinery import EXTENSION_SUFFIXES
-from typing import (
-    Dict,
-    FrozenSet,
-    Iterable,
-    Iterator,
-    List,
-    Optional,
-    Sequence,
-    Tuple,
-    Union,
-    cast,
-)
-
-from . import _manylinux, _musllinux
-
-logger = logging.getLogger(__name__)
-
-PythonVersion = Sequence[int]
-MacVersion = Tuple[int, int]
-
-INTERPRETER_SHORT_NAMES: Dict[str, str] = {
-    "python": "py",  # Generic.
-    "cpython": "cp",
-    "pypy": "pp",
-    "ironpython": "ip",
-    "jython": "jy",
-}
-
-
-_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
-
-
-class Tag:
-    """
-    A representation of the tag triple for a wheel.
-
-    Instances are considered immutable and thus are hashable. Equality checking
-    is also supported.
-    """
-
-    __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
-
-    def __init__(self, interpreter: str, abi: str, platform: str) -> None:
-        self._interpreter = interpreter.lower()
-        self._abi = abi.lower()
-        self._platform = platform.lower()
-        # The __hash__ of every single element in a Set[Tag] will be evaluated each time
-        # that a set calls its `.disjoint()` method, which may be called hundreds of
-        # times when scanning a page of links for packages with tags matching that
-        # Set[Tag]. Pre-computing the value here produces significant speedups for
-        # downstream consumers.
-        self._hash = hash((self._interpreter, self._abi, self._platform))
-
-    @property
-    def interpreter(self) -> str:
-        return self._interpreter
-
-    @property
-    def abi(self) -> str:
-        return self._abi
-
-    @property
-    def platform(self) -> str:
-        return self._platform
-
-    def __eq__(self, other: object) -> bool:
-        if not isinstance(other, Tag):
-            return NotImplemented
-
-        return (
-            (self._hash == other._hash)  # Short-circuit ASAP for perf reasons.
-            and (self._platform == other._platform)
-            and (self._abi == other._abi)
-            and (self._interpreter == other._interpreter)
-        )
-
-    def __hash__(self) -> int:
-        return self._hash
-
-    def __str__(self) -> str:
-        return f"{self._interpreter}-{self._abi}-{self._platform}"
-
-    def __repr__(self) -> str:
-        return f"<{self} @ {id(self)}>"
-
-
-def parse_tag(tag: str) -> FrozenSet[Tag]:
-    """
-    Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
-
-    Returning a set is required due to the possibility that the tag is a
-    compressed tag set.
-    """
-    tags = set()
-    interpreters, abis, platforms = tag.split("-")
-    for interpreter in interpreters.split("."):
-        for abi in abis.split("."):
-            for platform_ in platforms.split("."):
-                tags.add(Tag(interpreter, abi, platform_))
-    return frozenset(tags)
-
-
-def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
-    value = sysconfig.get_config_var(name)
-    if value is None and warn:
-        logger.debug(
-            "Config variable '%s' is unset, Python ABI tag may be incorrect", name
-        )
-    return value
-
-
-def _normalize_string(string: str) -> str:
-    return string.replace(".", "_").replace("-", "_")
-
-
-def _abi3_applies(python_version: PythonVersion) -> bool:
-    """
-    Determine if the Python version supports abi3.
-
-    PEP 384 was first implemented in Python 3.2.
-    """
-    return len(python_version) > 1 and tuple(python_version) >= (3, 2)
-
-
-def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
-    py_version = tuple(py_version)  # To allow for version comparison.
-    abis = []
-    version = _version_nodot(py_version[:2])
-    debug = pymalloc = ucs4 = ""
-    with_debug = _get_config_var("Py_DEBUG", warn)
-    has_refcount = hasattr(sys, "gettotalrefcount")
-    # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
-    # extension modules is the best option.
-    # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
-    has_ext = "_d.pyd" in EXTENSION_SUFFIXES
-    if with_debug or (with_debug is None and (has_refcount or has_ext)):
-        debug = "d"
-    if py_version < (3, 8):
-        with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
-        if with_pymalloc or with_pymalloc is None:
-            pymalloc = "m"
-        if py_version < (3, 3):
-            unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
-            if unicode_size == 4 or (
-                unicode_size is None and sys.maxunicode == 0x10FFFF
-            ):
-                ucs4 = "u"
-    elif debug:
-        # Debug builds can also load "normal" extension modules.
-        # We can also assume no UCS-4 or pymalloc requirement.
-        abis.append(f"cp{version}")
-    abis.insert(
-        0,
-        "cp{version}{debug}{pymalloc}{ucs4}".format(
-            version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
-        ),
-    )
-    return abis
-
-
-def cpython_tags(
-    python_version: Optional[PythonVersion] = None,
-    abis: Optional[Iterable[str]] = None,
-    platforms: Optional[Iterable[str]] = None,
-    *,
-    warn: bool = False,
-) -> Iterator[Tag]:
-    """
-    Yields the tags for a CPython interpreter.
-
-    The tags consist of:
-    - cp--
-    - cp-abi3-
-    - cp-none-
-    - cp-abi3-  # Older Python versions down to 3.2.
-
-    If python_version only specifies a major version then user-provided ABIs and
-    the 'none' ABItag will be used.
-
-    If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
-    their normal position and not at the beginning.
-    """
-    if not python_version:
-        python_version = sys.version_info[:2]
-
-    interpreter = f"cp{_version_nodot(python_version[:2])}"
-
-    if abis is None:
-        if len(python_version) > 1:
-            abis = _cpython_abis(python_version, warn)
-        else:
-            abis = []
-    abis = list(abis)
-    # 'abi3' and 'none' are explicitly handled later.
-    for explicit_abi in ("abi3", "none"):
-        try:
-            abis.remove(explicit_abi)
-        except ValueError:
-            pass
-
-    platforms = list(platforms or platform_tags())
-    for abi in abis:
-        for platform_ in platforms:
-            yield Tag(interpreter, abi, platform_)
-    if _abi3_applies(python_version):
-        yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
-    yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
-
-    if _abi3_applies(python_version):
-        for minor_version in range(python_version[1] - 1, 1, -1):
-            for platform_ in platforms:
-                interpreter = "cp{version}".format(
-                    version=_version_nodot((python_version[0], minor_version))
-                )
-                yield Tag(interpreter, "abi3", platform_)
-
-
-def _generic_abi() -> Iterator[str]:
-    abi = sysconfig.get_config_var("SOABI")
-    if abi:
-        yield _normalize_string(abi)
-
-
-def generic_tags(
-    interpreter: Optional[str] = None,
-    abis: Optional[Iterable[str]] = None,
-    platforms: Optional[Iterable[str]] = None,
-    *,
-    warn: bool = False,
-) -> Iterator[Tag]:
-    """
-    Yields the tags for a generic interpreter.
-
-    The tags consist of:
-    - --
-
-    The "none" ABI will be added if it was not explicitly provided.
-    """
-    if not interpreter:
-        interp_name = interpreter_name()
-        interp_version = interpreter_version(warn=warn)
-        interpreter = "".join([interp_name, interp_version])
-    if abis is None:
-        abis = _generic_abi()
-    platforms = list(platforms or platform_tags())
-    abis = list(abis)
-    if "none" not in abis:
-        abis.append("none")
-    for abi in abis:
-        for platform_ in platforms:
-            yield Tag(interpreter, abi, platform_)
-
-
-def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
-    """
-    Yields Python versions in descending order.
-
-    After the latest version, the major-only version will be yielded, and then
-    all previous versions of that major version.
-    """
-    if len(py_version) > 1:
-        yield f"py{_version_nodot(py_version[:2])}"
-    yield f"py{py_version[0]}"
-    if len(py_version) > 1:
-        for minor in range(py_version[1] - 1, -1, -1):
-            yield f"py{_version_nodot((py_version[0], minor))}"
-
-
-def compatible_tags(
-    python_version: Optional[PythonVersion] = None,
-    interpreter: Optional[str] = None,
-    platforms: Optional[Iterable[str]] = None,
-) -> Iterator[Tag]:
-    """
-    Yields the sequence of tags that are compatible with a specific version of Python.
-
-    The tags consist of:
-    - py*-none-
-    - -none-any  # ... if `interpreter` is provided.
-    - py*-none-any
-    """
-    if not python_version:
-        python_version = sys.version_info[:2]
-    platforms = list(platforms or platform_tags())
-    for version in _py_interpreter_range(python_version):
-        for platform_ in platforms:
-            yield Tag(version, "none", platform_)
-    if interpreter:
-        yield Tag(interpreter, "none", "any")
-    for version in _py_interpreter_range(python_version):
-        yield Tag(version, "none", "any")
-
-
-def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
-    if not is_32bit:
-        return arch
-
-    if arch.startswith("ppc"):
-        return "ppc"
-
-    return "i386"
-
-
-def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
-    formats = [cpu_arch]
-    if cpu_arch == "x86_64":
-        if version < (10, 4):
-            return []
-        formats.extend(["intel", "fat64", "fat32"])
-
-    elif cpu_arch == "i386":
-        if version < (10, 4):
-            return []
-        formats.extend(["intel", "fat32", "fat"])
-
-    elif cpu_arch == "ppc64":
-        # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
-        if version > (10, 5) or version < (10, 4):
-            return []
-        formats.append("fat64")
-
-    elif cpu_arch == "ppc":
-        if version > (10, 6):
-            return []
-        formats.extend(["fat32", "fat"])
-
-    if cpu_arch in {"arm64", "x86_64"}:
-        formats.append("universal2")
-
-    if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
-        formats.append("universal")
-
-    return formats
-
-
-def mac_platforms(
-    version: Optional[MacVersion] = None, arch: Optional[str] = None
-) -> Iterator[str]:
-    """
-    Yields the platform tags for a macOS system.
-
-    The `version` parameter is a two-item tuple specifying the macOS version to
-    generate platform tags for. The `arch` parameter is the CPU architecture to
-    generate platform tags for. Both parameters default to the appropriate value
-    for the current system.
-    """
-    version_str, _, cpu_arch = platform.mac_ver()
-    if version is None:
-        version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
-    else:
-        version = version
-    if arch is None:
-        arch = _mac_arch(cpu_arch)
-    else:
-        arch = arch
-
-    if (10, 0) <= version and version < (11, 0):
-        # Prior to Mac OS 11, each yearly release of Mac OS bumped the
-        # "minor" version number.  The major version was always 10.
-        for minor_version in range(version[1], -1, -1):
-            compat_version = 10, minor_version
-            binary_formats = _mac_binary_formats(compat_version, arch)
-            for binary_format in binary_formats:
-                yield "macosx_{major}_{minor}_{binary_format}".format(
-                    major=10, minor=minor_version, binary_format=binary_format
-                )
-
-    if version >= (11, 0):
-        # Starting with Mac OS 11, each yearly release bumps the major version
-        # number.   The minor versions are now the midyear updates.
-        for major_version in range(version[0], 10, -1):
-            compat_version = major_version, 0
-            binary_formats = _mac_binary_formats(compat_version, arch)
-            for binary_format in binary_formats:
-                yield "macosx_{major}_{minor}_{binary_format}".format(
-                    major=major_version, minor=0, binary_format=binary_format
-                )
-
-    if version >= (11, 0):
-        # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
-        # Arm64 support was introduced in 11.0, so no Arm binaries from previous
-        # releases exist.
-        #
-        # However, the "universal2" binary format can have a
-        # macOS version earlier than 11.0 when the x86_64 part of the binary supports
-        # that version of macOS.
-        if arch == "x86_64":
-            for minor_version in range(16, 3, -1):
-                compat_version = 10, minor_version
-                binary_formats = _mac_binary_formats(compat_version, arch)
-                for binary_format in binary_formats:
-                    yield "macosx_{major}_{minor}_{binary_format}".format(
-                        major=compat_version[0],
-                        minor=compat_version[1],
-                        binary_format=binary_format,
-                    )
-        else:
-            for minor_version in range(16, 3, -1):
-                compat_version = 10, minor_version
-                binary_format = "universal2"
-                yield "macosx_{major}_{minor}_{binary_format}".format(
-                    major=compat_version[0],
-                    minor=compat_version[1],
-                    binary_format=binary_format,
-                )
-
-
-def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
-    linux = _normalize_string(sysconfig.get_platform())
-    if is_32bit:
-        if linux == "linux_x86_64":
-            linux = "linux_i686"
-        elif linux == "linux_aarch64":
-            linux = "linux_armv7l"
-    _, arch = linux.split("_", 1)
-    yield from _manylinux.platform_tags(linux, arch)
-    yield from _musllinux.platform_tags(arch)
-    yield linux
-
-
-def _generic_platforms() -> Iterator[str]:
-    yield _normalize_string(sysconfig.get_platform())
-
-
-def platform_tags() -> Iterator[str]:
-    """
-    Provides the platform tags for this installation.
-    """
-    if platform.system() == "Darwin":
-        return mac_platforms()
-    elif platform.system() == "Linux":
-        return _linux_platforms()
-    else:
-        return _generic_platforms()
-
-
-def interpreter_name() -> str:
-    """
-    Returns the name of the running interpreter.
-    """
-    name = sys.implementation.name
-    return INTERPRETER_SHORT_NAMES.get(name) or name
-
-
-def interpreter_version(*, warn: bool = False) -> str:
-    """
-    Returns the version of the running interpreter.
-    """
-    version = _get_config_var("py_version_nodot", warn=warn)
-    if version:
-        version = str(version)
-    else:
-        version = _version_nodot(sys.version_info[:2])
-    return version
-
-
-def _version_nodot(version: PythonVersion) -> str:
-    return "".join(map(str, version))
-
-
-def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
-    """
-    Returns the sequence of tag triples for the running interpreter.
-
-    The order of the sequence corresponds to priority order for the
-    interpreter, from most to least important.
-    """
-
-    interp_name = interpreter_name()
-    if interp_name == "cp":
-        yield from cpython_tags(warn=warn)
-    else:
-        yield from generic_tags()
-
-    if interp_name == "pp":
-        yield from compatible_tags(interpreter="pp3")
-    else:
-        yield from compatible_tags()
diff --git a/spaces/Audio-AGI/WavJourney/convert_json_to_audio_gen_code.py b/spaces/Audio-AGI/WavJourney/convert_json_to_audio_gen_code.py
deleted file mode 100644
index 284cd1f0d4844c9bc5489999aa22b47aac598137..0000000000000000000000000000000000000000
--- a/spaces/Audio-AGI/WavJourney/convert_json_to_audio_gen_code.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import argparse
-import os
-import json5
-from pathlib import Path
-from code_generator import AudioCodeGenerator
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("--script", help="Path to the json script file")
-    parser.add_argument("--character-to-voice-map", help="Path to the character-to-voice mapping CSV file")
-    parser.add_argument(
-        "--path",
-        type=str,
-        default=".",
-        help="Path of all the output wav files to be created by the generated code, default: current path"
-    )
-    args = parser.parse_args()
-
-    if not os.path.isfile(args.script):
-        print(f"File {args.script} does not exist.")
-        return
-
-    output_path = Path(args.path)
-    audio_code_generator = AudioCodeGenerator()
-    code = audio_code_generator.parse_and_generate(args.script, args.character_to_voice_map, output_path)
-    print(code)
-
-if __name__ == "__main__":
-    main()
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py
deleted file mode 100644
index 1aad53bfef62fb584d5022585d567e346f671a55..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from ..common.optim import SGD as optimizer
-from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
-from ..common.data.coco_keypoint import dataloader
-from ..common.models.keypoint_rcnn_fpn import model
-from ..common.train import train
-
-model.backbone.bottom_up.freeze_at = 2
-train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/spaces/Banbri/zcvzcv/src/lib/loadImageToCanvas.ts b/spaces/Banbri/zcvzcv/src/lib/loadImageToCanvas.ts
deleted file mode 100644
index 02068927ce6e615d4dac2aed31e75f9f51697f27..0000000000000000000000000000000000000000
--- a/spaces/Banbri/zcvzcv/src/lib/loadImageToCanvas.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-export async function loadImageToCanvas(imageBase64: string): Promise {
-  return new Promise((resolve, reject) => {
-    // create a new image object
-    let img = new Image();
-    // specify a function to run when the image is fully loaded
-    img.onload = () => {
-      // create a canvas element
-      let canvas = document.createElement('canvas');
-      canvas.width = img.width;
-      canvas.height = img.height;
-      // get the context of the canvas
-      let ctx = canvas.getContext('2d');
-      if (ctx) {
-        // draw the image into the canvas
-        ctx.drawImage(img, 0, 0);
-        // resolve the promise with the canvas
-        resolve(canvas);
-      } else {
-        reject('Error creating the context of canvas');
-      }
-    };
-    // specify a function to run when the image could not be loaded
-    img.onerror = () => {
-      reject('Image could not be loaded');
-    };
-    img.src = imageBase64; // must be a data;image/.... prefixed URL string
-  });
-}
\ No newline at end of file
diff --git "a/spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/pages/02_\360\237\223\274_Upload_Video_File.py" "b/spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/pages/02_\360\237\223\274_Upload_Video_File.py"
deleted file mode 100644
index 3c1f1938be4c12602865b27fc067cd1648f761d6..0000000000000000000000000000000000000000
--- "a/spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/pages/02_\360\237\223\274_Upload_Video_File.py"
+++ /dev/null
@@ -1,230 +0,0 @@
-import whisper
-import streamlit as st
-from streamlit_lottie import st_lottie
-from utils import write_vtt, write_srt
-import ffmpeg
-import requests
-from typing import Iterator
-from io import StringIO
-import numpy as np
-import pathlib
-import os
-
-st.set_page_config(page_title="Auto Subtitled Video Generator", page_icon=":movie_camera:", layout="wide")
-
-# Define a function that we can use to load lottie files from a link.
-@st.cache(allow_output_mutation=True)
-def load_lottieurl(url: str):
-    r = requests.get(url)
-    if r.status_code != 200:
-        return None
-    return r.json()
-
-
-APP_DIR = pathlib.Path(__file__).parent.absolute()
-
-LOCAL_DIR = APP_DIR / "local"
-LOCAL_DIR.mkdir(exist_ok=True)
-save_dir = LOCAL_DIR / "output"
-save_dir.mkdir(exist_ok=True)
-
-
-loaded_model = whisper.load_model("base")
-current_size = "None"
-
-
-col1, col2 = st.columns([1, 3])
-with col1:
-    lottie = load_lottieurl("https://assets1.lottiefiles.com/packages/lf20_HjK9Ol.json")
-    st_lottie(lottie)
-
-with col2:
-    st.write("""
-    ## Auto Subtitled Video Generator 
-    ##### Upload a video file and get a video with subtitles.
-    ###### ➠ If you want to transcribe the video in its original language, select the task as "Transcribe"
-    ###### ➠ If you want to translate the subtitles to English, select the task as "Translate" 
-    ###### I recommend starting with the base model and then experimenting with the larger models, the small and medium models often work well. """)
-
-
-@st.cache(allow_output_mutation=True)
-def change_model(current_size, size):
-    if current_size != size:
-        loaded_model = whisper.load_model(size)
-        return loaded_model
-    else:
-        raise Exception("Model size is the same as the current size.")
-
-
-@st.cache(allow_output_mutation=True)
-def inferecence(loaded_model, uploaded_file, task):
-    with open(f"{save_dir}/input.mp4", "wb") as f:
-            f.write(uploaded_file.read())
-    audio = ffmpeg.input(f"{save_dir}/input.mp4")
-    audio = ffmpeg.output(audio, f"{save_dir}/output.wav", acodec="pcm_s16le", ac=1, ar="16k")
-    ffmpeg.run(audio, overwrite_output=True)
-    if task == "Transcribe":
-        options = dict(task="transcribe", best_of=5)
-        results = loaded_model.transcribe(f"{save_dir}/output.wav", **options)
-        vtt = getSubs(results["segments"], "vtt", 80)
-        srt = getSubs(results["segments"], "srt", 80)
-        lang = results["language"]
-        return results["text"], vtt, srt, lang
-    elif task == "Translate":
-        options = dict(task="translate", best_of=5)
-        results = loaded_model.transcribe(f"{save_dir}/output.wav", **options)
-        vtt = getSubs(results["segments"], "vtt", 80)
-        srt = getSubs(results["segments"], "srt", 80)
-        lang = results["language"]
-        return results["text"], vtt, srt, lang
-    else:
-        raise ValueError("Task not supported")
-
-
-def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
-    segmentStream = StringIO()
-
-    if format == 'vtt':
-        write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
-    elif format == 'srt':
-        write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
-    else:
-        raise Exception("Unknown format " + format)
-
-    segmentStream.seek(0)
-    return segmentStream.read()
-
-
-def generate_subtitled_video(video, audio, transcript):
-    video_file = ffmpeg.input(video)
-    audio_file = ffmpeg.input(audio)
-    ffmpeg.concat(video_file.filter("subtitles", transcript), audio_file, v=1, a=1).output("final.mp4").run(quiet=True, overwrite_output=True)
-    video_with_subs = open("final.mp4", "rb")
-    return video_with_subs
-
-
-def main():
-    size = st.selectbox("Select Model Size (The larger the model, the more accurate the transcription will be, but it will take longer)", ["tiny", "base", "small", "medium", "large"], index=1)
-    loaded_model = change_model(current_size, size)
-    st.write(f"Model is {'multilingual' if loaded_model.is_multilingual else 'English-only'} "
-        f"and has {sum(np.prod(p.shape) for p in loaded_model.parameters()):,} parameters.")
-    input_file = st.file_uploader("File", type=["mp4", "avi", "mov", "mkv"])
-    # get the name of the input_file
-    if input_file is not None:
-        filename = input_file.name[:-4]
-    else:
-        filename = None
-    task = st.selectbox("Select Task", ["Transcribe", "Translate"], index=0)
-    if task == "Transcribe":
-        if st.button("Transcribe"):
-            results = inferecence(loaded_model, input_file, task)
-            col3, col4 = st.columns(2)
-            col5, col6, col7, col8 = st.columns(4)
-            col9, col10 = st.columns(2)
-            with col3:
-                st.video(input_file)
-                
-            with open("transcript.txt", "w+", encoding='utf8') as f:
-                f.writelines(results[0])
-                f.close()
-            with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
-                datatxt = f.read()
-                
-            with open("transcript.vtt", "w+",encoding='utf8') as f:
-                f.writelines(results[1])
-                f.close()
-            with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
-                datavtt = f.read()
-                
-            with open("transcript.srt", "w+",encoding='utf8') as f:
-                f.writelines(results[2])
-                f.close()
-            with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
-                datasrt = f.read()
-
-            with col5:
-                st.download_button(label="Download Transcript (.txt)",
-                                data=datatxt,
-                                file_name="transcript.txt")
-            with col6:   
-                st.download_button(label="Download Transcript (.vtt)",
-                                    data=datavtt,
-                                    file_name="transcript.vtt")
-            with col7:
-                st.download_button(label="Download Transcript (.srt)",
-                                    data=datasrt,
-                                    file_name="transcript.srt")
-            with col9:
-                st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
-            with col10:
-                st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
-                        
-            with col4:
-                with st.spinner("Generating Subtitled Video"):
-                    video_with_subs = generate_subtitled_video(f"{save_dir}/input.mp4", f"{save_dir}/output.wav", "transcript.srt")
-                st.video(video_with_subs)
-                st.snow()
-            with col8:
-                st.download_button(label="Download Video with Subtitles",
-                                data=video_with_subs,
-                                file_name=f"{filename}_with_subs.mp4")
-    elif task == "Translate":
-        if st.button("Translate to English"):
-            results = inferecence(loaded_model, input_file, task)
-            col3, col4 = st.columns(2)
-            col5, col6, col7, col8 = st.columns(4)
-            col9, col10 = st.columns(2)
-            with col3:
-                st.video(input_file)
-                
-            with open("transcript.txt", "w+", encoding='utf8') as f:
-                f.writelines(results[0])
-                f.close()
-            with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
-                datatxt = f.read()
-                
-            with open("transcript.vtt", "w+",encoding='utf8') as f:
-                f.writelines(results[1])
-                f.close()
-            with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
-                datavtt = f.read()
-                
-            with open("transcript.srt", "w+",encoding='utf8') as f:
-                f.writelines(results[2])
-                f.close()
-            with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
-                datasrt = f.read()
-                
-            with col5:
-                st.download_button(label="Download Transcript (.txt)",
-                                data=datatxt,
-                                file_name="transcript.txt")
-            with col6:   
-                st.download_button(label="Download Transcript (.vtt)",
-                                    data=datavtt,
-                                    file_name="transcript.vtt")
-            with col7:
-                st.download_button(label="Download Transcript (.srt)",
-                                    data=datasrt,
-                                    file_name="transcript.srt")
-            with col9:
-                st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
-            with col10:
-                st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
-                        
-            with col4:
-                with st.spinner("Generating Subtitled Video"):
-                    video_with_subs = generate_subtitled_video(f"{save_dir}/input.mp4", f"{save_dir}/output.wav", "transcript.srt")
-                st.video(video_with_subs)
-                st.snow()
-            with col8:
-                st.download_button(label="Download Video with Subtitles ",
-                                data=video_with_subs,
-                                file_name=f"{filename}_with_subs.mp4")
-    else:
-        st.error("Please select a task.")
-
-
-if __name__ == "__main__":
-    main()
-    st.markdown("###### Made with :heart: by [@BatuhanYılmaz](https://twitter.com/batuhan3326) [![this is an image link](https://i.imgur.com/thJhzOO.png)](https://www.buymeacoffee.com/batuhanylmz)")
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Alors On Danse  Remix.md b/spaces/Benson/text-generation/Examples/Alors On Danse  Remix.md
deleted file mode 100644
index cbecefc94497b5af0f8cc9e3d968717182fbb65a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Alors On Danse  Remix.md	
+++ /dev/null
@@ -1,39 +0,0 @@
-
-

Alors On Danse: Cómo descargar y disfrutar de los remixes de Stromae’s Hit Song

-

Si te gusta la música dance, probablemente hayas oído hablar de Alors On Danse, la canción pegadiza de la cantante belga Stromae. Pero, ¿sabías que hay muchos remixes de esta canción que pueden hacerte bailar aún más? En este artículo, te contaremos todo lo que necesitas saber sobre Alors On Danse, sus remezclas y cómo descargarlas y disfrutarlas.

-

alors on danse скачать remix


Download File ✓✓✓ https://bltlly.com/2v6Mf4



-

¿Qué es Alors On Danse y por qué es tan popular?

-

Alors On Danse (que significa "So We Dance" en francés) es una canción de Stromae, un cantante, rapero, compositor y productor belga. Es principalmente conocido por su música mezclando hip hop y música electrónica.

-

La canción original de Stromae

-

La canción fue lanzada en 2009 como el primer sencillo de su álbum debut Cheese. Fue escrita y producida por el propio Stromae, quien también canta en francés. La canción tiene una melodía simple pero pegadiza, un ritmo pulsante y un estribillo que repite la frase "alors on danse".

-

El significado y el mensaje de la letra

-

Las letras de la canción tratan sobre las dificultades y luchas de la vida, como el trabajo, el dinero, el estrés, la soledad, el divorcio y la muerte. Stromae canta que la gente trata de escapar de sus problemas bailando, bebiendo y de fiesta, pero todavía están infelices y deprimidos. También critica la superficialidad y la hipocresía de la sociedad, donde la gente pretende ser feliz y exitosa, pero en realidad son miserables y vacíos.

-

-

El éxito global y el impacto de la canción

-

La canción se convirtió en un gran éxito en Europa y más allá, alcanzando el número uno en varios países, como Francia, Bélgica, Alemania, Italia, España, Suiza, Austria, Dinamarca, Rumania, Grecia, Turquía, Israel y Marruecos. También recibió críticas positivas de críticos y fans por igual, que elogiaron su melodía pegadiza, sus letras ingeniosas y su comentario social.

- -

¿Cuáles son los remixes de Alors en Danse y cómo encontrarlos?

-

Debido a su popularidad y atractivo, Alors On Danse ha sido remezclado por muchos artistas y DJs diferentes a lo largo de los años. Algunos de estos remixes se han vuelto muy populares, añadiendo nuevos giros y sabores a la canción original.

-

El remix de Dubdogz y su vídeo oficial

-

Uno de los remixes más famosos de Alors On Danse es el de Dubdogz, un dúo brasileño de productores de música electrónica. Lanzaron su remix en 2018 bajo el sello Musical Freedom Records. Su remix añade un profundo ambiente house a la canción, con una línea de bajo groovy, un riff de guitarra funky y algunos efectos vocales. El remix es muy pegadizo y bailable, y tiene más de 100 millones de visitas en YouTube.

-

El video oficial del remix cuenta con el propio Stromae, que aparece en varias escenas bailando y cantando junto al remix. También interactúa con algunos bailarines y fans, que se unen a él en su diversión y alegría. El video es muy colorido y enérgico, y muestra el carisma y el humor de Stromae.

-

Otros remixes de diferentes artistas y DJs

-

Hay muchos otros remixes de Alors On Danse que puedes encontrar en línea. Algunos de ellos son de reconocidos artistas y DJs, como Kanye West, Pitbull, Sean Paul, David Guetta, Afrojack, Dimitri Vegas & Like Mike, Kungs y Martin Solveig. Estos remixes añaden diferentes géneros y estilos a la canción, como rap, reggaeton, pop, electro, house, trap y tropical. Algunos de ellos también cuentan con nuevos versos o letras en inglés o español.

-

Algunos de los remixes son de artistas y DJs menos conocidos o emergentes, que ponen su propio giro y creatividad a la canción. Algunos ejemplos son los remixes de Jaxx & Vega, Keanu Silva, Dastic & Tommy Jayden, Rudeejay & Da Brozz x Luis Rodriguez, y DJ Dark & Mentol. Estos remixes ofrecen sonidos frescos y originales y ritmos para la canción, como sala grande, casa futura, casa progresiva, rebote y casa profunda.

- -

Si desea escuchar o descargar los remixes de Alors On Danse en línea, hay varias maneras de hacerlo. Aquí hay algunos consejos:

- - Utilice un servicio o aplicación de transmisión de música, como Spotify, Apple Music, YouTube Music, Deezer, SoundCloud o Shazam. Estos servicios tienen un gran catálogo de canciones y remixes que puedes transmitir o descargar en tu dispositivo. También puedes crear tus propias listas de reproducción con tus remezclas favoritas de Alors On Danse. - Usa un servicio o aplicación de transmisión de video, como YouTube, Vimeo, Dailymotion o TikTok. Estos servicios tienen una gran cantidad de vídeos de Alors On Danse remixes que puede ver o descargar en su dispositivo. También puedes ver los videos oficiales de algunos remixes, así como videos o portadas hechas por fans. - Utilice un servicio o aplicación de descarga de música, como MP3Juices, Zippyshare o Tubidy. Estos servicios le permiten descargar archivos MP3 de Alors On Danse remixes de forma gratuita en su dispositivo. También puede convertir vídeos de YouTube a archivos MP3 utilizando estos servicios. - Utilice un motor de búsqueda, como Google, Bing o DuckDuckGo. Estos motores pueden ayudarle a encontrar sitios web o blogs que ofrecen remixes de Alors On Danse para descargar o transmitir. También puedes usar palabras clave como "Alors On Danse remix download", "Alors On Danse remix mp3", o "Alors On Danse remix free" para reducir tus resultados de búsqueda.

¿Cómo disfrutar de Alors On Danse Remixes en casa o en una fiesta?

-

Ahora que ha encontrado y descargado sus remezclas favoritas de Alors On Danse, es posible que se pregunte cómo disfrutarlas en casa o en una fiesta. Estos son algunos consejos:

-

Consejos para crear una lista de reproducción con Alors On Danse remixes

-

Si desea crear una lista de reproducción con Alors On Danse remixes, debe considerar los siguientes factores:

- -

Si quieres bailar con Alors On Danse remixes, debes considerar los siguientes factores:

- - El ritmo y el ritmo de los remixes. ¿Qué tan rápidos o lentos son los remixes con los que quieres bailar? ¿Cómo coinciden con tu estilo y ritmo de baile preferido? ¿Quieres bailar al ritmo o a la melodía de los remixes? ¿Quieres seguir las letras o improvisar tus propios movimientos? - El espacio y el equipamiento para bailar. ¿Cuánto espacio tienes para bailar? ¿Cuán cómodo y seguro es el piso y el área circundante? ¿Tiene un buen sistema de altavoces o auriculares para escuchar los remixes? ¿Tienes un espejo o una cámara para mirarte o grabar tu baile? - El estado de ánimo y la actitud para bailar. ¿Cómo te sientes cuando bailas a Alors On Danse remixes? ¿Te sientes feliz y enérgico, o triste y deprimido? ¿Bailas para expresarte o para impresionar a otros? ¿Bailas solo o con otros? ¿Te diviertes o te lo tomas en serio?

Consejos para cantar a lo largo de Alors On Danse remixes

-

Si quieres cantar junto a Alors On Danse remixes, debes considerar los siguientes factores:

- -

Alors On Danse es una gran canción de Stromae que tiene muchos remixes que pueden hacerte bailar y cantar aún más. En este artículo, te hemos mostrado lo que es Alors On Danse, por qué es tan popular, cuáles son algunos de sus mejores remixes, cómo encontrarlos y descargarlos en línea, y cómo disfrutarlos en casa o en una fiesta. Esperamos que este artículo haya sido útil e informativo para ti, y que hayas aprendido algo nuevo sobre Alors On Danse y sus remixes.

-

Ahora que ha leído este artículo, ¿por qué no intenta escuchar algunos remixes de Alors On Danse usted mismo? Puedes descubrir una nueva canción favorita o una nueva forma de divertirte. Y recuerda, como dice Stromae, "alors on danse"!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Alors On Danse y sus remixes:

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Apkoppor Bild.md b/spaces/Benson/text-generation/Examples/Apkoppor Bild.md deleted file mode 100644 index bb3343e680970bb61cef1b63c0ba50de787b69ab..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Apkoppor Bild.md +++ /dev/null @@ -1,86 +0,0 @@ - -

monkey pox Photo: Everything you need to know about the unusual disease.

-

Introduction

-

describe what monkey pox is, why it is relevant and what the purpose of the article is.

-

apkoppor bild


Download Zip ··· https://bltlly.com/2v6Kt6



-

What is monkey pox?

-

explain what monkey pox is to a viral disease, how it is caused and how it is spread.

-

monkey pox and smallpox

-

Compare monkey pox with smallpox and explain the differences and similarities.

-

How do you recognize monkey pox?

-

describe the symptoms of appox and how they differ from other skin diseases.

-

monkey pox Image: This is what the wind looks like

-

Show pictures of what the blisters look like in monkey pox and where they usually occur on the body.

-

How to treat monkey pox?

-

tell if there is any treatment or vaccine against appox and how to relieve the symptoms.

-

-

monkey pox in Sweden

-

Information of the first case of monkey pox In Sweden and how the authorities handle the situation.

-

: How to prevent monkey pox?

-

Ge tips on how to reduce the risk of being infected by monkey pox or spreading the infection.

-

monkey pox and resort.

-

ask about what to consider if you plan to travel to a country where monkey pox see also:

-

final bet

-

Combine the article and give a concluding comment about apkopplor.

-

regular questions about apkoppor

-
    -
  • means

  • -
  • Svar on where the name comes from and what it means.

  • -
  • : How to infect monkey pox between humans?

  • -
  • : What situations can lead to infection between people and how much risk of infection see also:

  • -
  • How long is one contagious with monkey pox?

  • -
  • Svara on how long you can spread the infection after you become ill and when you can be considered healthy.

  • -
  • can you get monkey cups more than once?

  • - -
  • can animals get monkey pox?

  • -
  • Svara on which animals can be affected by monkey pox and how they can infect humans or other animals.

  • -
-Here is the article based on the outline:

monkey pox Everything you need to know about the rare disease.

-

Introduction

-

monkey pox is a viral disease that causes blisters on the skin. It is rare in Sweden, but has recently been noticed after a case was discovered in the country. Monkey pox can be serious for people with compromised immune systems or other diseases. In this article, we’ll tell you more about what monkey pox is, how to recognize it, how to treat it, and how to prevent it. We will also show pictures of what the blisters look like and answer some common questions about monkey pox.

-

What is monkey pox?

-

monkey pox is a viral disease caused by a virus called orthopox virus. There are several types of orthopox virus, but what causes appox is called monkeypox virus. It is a zoonotic virus, which means it can infect animals and humans. Monkey pox is not the same as smallpox, which is another type of orthopox virus that has been eradicated since the 1980s. Monkey pox was first discovered in Africa in the 1970th century.

-

monkey pox and smallpox

-

monkey pox and smallpox have some similarities, but also some differences. Both cause blisters on the skin that can leave scars after they heal. Both can also cause fever, headache, muscle aches and fatigue. However, appox is usually milder than smallpox and has a lower mortality rate. Monkey pox is also spread less effectively between humans than smallpox. In addition, people who have been vaccinated against smallpox may have some protection against smallpox.

-

How do you recognize monkey pox?

- -

monkey pox Image: This is what the wind looks like

-

Hhere are some pictures of what the blisters look like in monkey pox:

- - - - - - - - - - - -
Blåsor på ansiktet Blåsor på handen Blåsor på foten
Blocker on facesBlocker on handsBlocker on
-

: It is important not to confuse monkey pox with other skin diseases that can cause similar symptoms, such as chickenpox, shingles, eczema, or allergic reactions. If you suspect that you have appox, you should contact a doctor as soon as possible to get a correct diagnosis and treatment.

-

How to treat monkey pox?

-

: There is no specific treatment or vaccine for monkey pox. Treatment consists primarily of relieving symptoms and preventing complications. For example, you can get antipyretic medication, pain medication, antihistamine for itching and antibiotics for possible bacterial infections in the bladder. You should also keep the blisters clean and dry and avoid squeezing or scratching them In some cases, you can get antiviral medication that can shorten the course of the disease and reduce the risk of serious complications. However, not everyone can get that medication, so it is important to consult a doctor if it is appropriate for one's situation.

-

monkey pox in Sweden

-

monkey pox is very rare in Sweden. The first and so far only case of monkey pox in Sweden was discovered in June 2023 in a person who had traveled to Nigeria. The person had blisters on the skin after coming home and seeking care in a hospital. Samples showed that they were monkeys. The person was isolated in the hospital and treated with antiviral medication. No one in the person’s vicinity was infected. The infectious disease doctor in the region said it was a very unusual case and that there was no reason for concern to the public.

-

: How to prevent monkey pox?

- -

monkey pox and resort.

-

If you plan to travel to a country where monkey pox occurs, you should pay extra attention to your health and hygiene. You should also be aware of which areas are affected by appox and avoid visiting them if possible. You should also be careful about eating or buying animal products that may be infected. If you get a fever, headache, muscle pain or blisters on your skin during or after the trip, you should seek care as soon as possible and tell us about your trip.

-

final bet

-

monkey pox is a viral disease that causes blisters on the skin. It is rare in Sweden, but has recently been reported in a person who had traveled to Nigeria. Monkey pox can be serious for people with compromised immune systems or other diseases. There is no specific treatment or vaccine for appox, but you can alleviate the symptoms and prevent complications with medication and good hygiene. The best way to prevent monkey pox is to avoid contact with people or animals that have blisters on the skin, especially if you travel to a country where monkey pox is present.

-

regular questions about apkoppor

-
    -
  • means

  • -
  • name monkey pox comes from the English word monkeypox, which means monkey infection. It refers to the fact that the virus was originally discovered in monkeys in Africa in the 1970s. However, it has nothing to do with monkeys today, but can infect different animals and humans.

  • -
  • : How to infect monkey pox between humans?

  • - -
  • How long is one contagious with monkey pox?

  • -
  • Man is contagious with monkey pox from the time the blisters begin to appear on the skin until all the blisters have healed and formed crusts. It can take between two and six weeks depending on how many blisters you have and how quickly they heal. One is not contagious until the blisters appear or after they have disappeared.

  • -
  • can you get monkey cups more than once?

  • -
  • it is not completely clear if you get any immunity to monkey pox after having it once or if you can get infected again. There are reports of people who have had appox more than once, but this is very rare. This may be because they have been exposed to different types of monkeypox viruses or because their immune system has weakened for some reason. It may also be that they have had another skin disease that has been confused with monkey pox.

  • -
  • can animals get monkey pox?

  • -
  • Yes, animals can get monkey pox and infect humans or other animals. The animals most commonly affected by monkey pox are rodents, such as squirrels, rats and mice. They can infect people through bites, scratches or contact with their body fluids or products. Other animals that can get monkey pox are monkeys, camels, goats, sheep, cats and dogs. They can infect humans in the same way as rodents or by contact with their blisters on the skin.

  • -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Bitcoin Core Apk.md b/spaces/Benson/text-generation/Examples/Bitcoin Core Apk.md deleted file mode 100644 index dfd276f8d5da64ccd83d5a7ec1a050f605347319..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Bitcoin Core Apk.md +++ /dev/null @@ -1,44 +0,0 @@ - -

Bitcoin Core APK: ¿Qué es y cómo usarlo?

-

Bitcoin es una moneda digital descentralizada que opera sin ninguna autoridad central o intermediario. Se basa en una red de nodos que ejecutan un software llamado Bitcoin Core, que valida las transacciones y mantiene la seguridad e integridad del sistema. Bitcoin Core es también una cartera que permite a los usuarios almacenar, enviar y recibir bitcoins.

-

Sin embargo, ejecutar Bitcoin Core en una computadora de escritorio o portátil puede ser un reto para algunos usuarios, ya que requiere mucho espacio en disco, ancho de banda y poder de procesamiento. Por otra parte, puede no ser conveniente o accesible para los usuarios móviles que quieren utilizar Bitcoin sobre la marcha. Ahí es donde Bitcoin Core APK entra en.

-

bitcoin core apk


DOWNLOADhttps://bltlly.com/2v6LKx



-

Bitcoin Core APK es un paquete de aplicaciones para Android (APK) archivo que contiene el software Bitcoin Core. Permite a los usuarios ejecutar un nodo completo en sus dispositivos Android, lo que significa que pueden tener control total sobre sus bitcoins y contribuir a la seguridad de la red. En este artículo, explicaremos qué es Bitcoin Core APK, cómo descargarlo e instalarlo, y cómo usarlo.

-

Cómo descargar e instalar Bitcoin Core APK

-

El primer paso para utilizar Bitcoin Core APK es descargarlo de una fuente confiable. El sitio web oficial de Bitcoin Core es https://bitcoincore.org/en/download/, donde se puede encontrar la última versión del software para varias plataformas, incluyendo Windows, MacOS, Linux y Android. También puede encontrar los hashes y firmas SHA256 de los archivos, que puede usar para verificar su autenticidad.

-

Para descargar Bitcoin Core APK, es necesario hacer clic en el enlace que dice "Android (APK)" en la sección de Linux. Esto descargará un archivo llamado bitcoin-core-24.0.1-brazo-linux-gnueabihf.apk, que es de unos 40 MB de tamaño. También puede usar un cliente torrent para descargar el archivo desde el mismo sitio web.

- -

Una vez completada la instalación, puede iniciar la aplicación pulsando en su icono. La aplicación le pedirá que elija un directorio de datos donde se almacenarán los datos de blockchain, que es de aproximadamente 500 GB de tamaño. Puede usar una tarjeta SD externa o una unidad USB para este propósito, ya que el almacenamiento interno de su dispositivo puede no ser suficiente. También puede habilitar la poda, lo que significa eliminar bloques antiguos que ya no son necesarios, para reducir el espacio de almacenamiento requerido.

-

La aplicación comenzará a sincronizarse con la red Bitcoin, que puede tardar varias horas o días dependiendo de la velocidad de Internet y el rendimiento del dispositivo. Puede comprobar el progreso de la sincronización mirando la barra de estado en la parte inferior de la pantalla de la aplicación.

-

Cómo utilizar Bitcoin Core APK

-

Una vez que su aplicación está completamente sincronizada con la red, puede comenzar a usarla como una billetera y un nodo. Estas son algunas de las cosas que puede hacer con Bitcoin Core APK:

Cómo ajustar la configuración y las preferencias de Bitcoin Core APK -

Bitcoin Core APK le permite personalizar varias configuraciones y preferencias para satisfacer sus necesidades y preferencias. Puede acceder al menú de configuración pulsando en el icono de tres puntos en la esquina superior derecha de la pantalla de la aplicación. Estos son algunos de los ajustes y preferencias que puede ajustar:

-
    -
  • Network: Puede elegir a qué red conectarse, como mainnet, testnet o regtest. También puede habilitar o deshabilitar Tor o un proxy para privacidad y anonimato.
  • -
  • Cartera: Puede elegir qué cartera usar o crear una nueva. También puede cifrar su billetera con una frase de contraseña, hacer una copia de seguridad de su billetera o importar o exportar claves privadas.
  • -
  • Mostrar: Puede elegir la unidad de moneda a mostrar, como BTC, mBTC, bits o satoshis. También puede elegir el idioma y el formato de fecha.
  • - -
  • Minería: Puede activar o desactivar la minería en su dispositivo, y establecer el número de subprocesos y el uso de la CPU para la minería.
  • -
  • Depurar: Puede ver información y estadísticas sobre su nodo, como el tráfico de red, los pares, los bloques, las transacciones y el mempool. También puede acceder a la consola y ejecutar comandos.
  • -
-

También puede restablecer la configuración a sus valores predeterminados tocando el botón "Opciones de restablecimiento" en la parte inferior del menú de configuración.

-

Conclusión

-

Bitcoin Core APK es una manera potente y conveniente para ejecutar un nodo completo en su dispositivo Android. Le da control total sobre sus bitcoins y ayuda a proteger la red. Sin embargo, también viene con algunas compensaciones, como requerir mucho espacio de almacenamiento, ancho de banda y potencia de procesamiento. Por lo tanto, usted debe sopesar cuidadosamente los pros y los contras antes de usar Bitcoin Core APK.

-

-

Si desea probar Bitcoin Core APK, puede descargarlo desde https://bitcoincore.org/en/download/. Asegúrese de verificar la autenticidad del archivo e instalarlo de forma segura en su dispositivo. Luego, puede crear o importar una billetera, sincronizar con la red y comenzar a enviar y recibir bitcoins. También puede ajustar la configuración y las preferencias para adaptarse a sus necesidades y preferencias.

-

Esperamos que este artículo le ha ayudado a entender lo que es Bitcoin Core APK y cómo usarlo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Y si te gustó este artículo, por favor compartirlo con tus amigos y familiares que podrían estar interesados en Bitcoin Core APK.

-

Preguntas frecuentes

-
    -
  • ¿Qué es Bitcoin Core?
  • -

    Bitcoin Core es el software original que implementa el protocolo Bitcoin. También es una cartera que permite a los usuarios almacenar, enviar y recibir bitcoins.

    -
  • ¿Qué es un archivo APK?
  • - -
  • ¿Cuáles son los beneficios de usar Bitcoin Core APK?
  • -

    Bitcoin Core APK le permite ejecutar un nodo completo en su dispositivo Android, lo que significa que puede tener control total sobre sus bitcoins y contribuir a la seguridad de la red. No tiene que depender de terceros o intermediarios para sus transacciones.

    -
  • ¿Cuáles son los riesgos de usar Bitcoin Core APK?
  • -

    Bitcoin Core APK requiere mucho espacio de almacenamiento, ancho de banda y poder de procesamiento. Puede agotar la batería y ralentizar el dispositivo. También puede exponerlo a algunos riesgos de seguridad y privacidad si no lo usa correctamente.

    -
  • ¿Cómo puedo usar Bitcoin Core APK de forma segura y eficiente?
  • -

    Usted debe utilizar una conexión segura y privada cuando se utiliza Bitcoin Core APK. También debe cifrar su billetera con una frase de contraseña, hacer copias de seguridad de su billetera regularmente y actualizar su software con frecuencia. También debes usar funciones de control de monedas para optimizar tus tarifas y privacidad.

    -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Choque Mini Mod Apk Nueva Versin.md b/spaces/Benson/text-generation/Examples/Choque Mini Mod Apk Nueva Versin.md deleted file mode 100644 index e4c32ee30030cc9c449d789f80897a96fa51f966..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Choque Mini Mod Apk Nueva Versin.md +++ /dev/null @@ -1,57 +0,0 @@ -
-

Clash Mini Mod APK Nueva Versión: Todo lo que necesita saber

-

Si eres un fan del universo Clash y disfrutas de los juegos de estrategia, es posible que hayas oído hablar de Clash Mini, un nuevo juego de Supercell que se encuentra actualmente en pruebas beta. Pero lo que si quieres sacar más provecho del juego y acceder a recursos y características ilimitadas? Ahí es donde Clash Mini Mod APK entra en juego. En este artículo, le diremos todo lo que necesita saber sobre Clash Mini Mod APK, incluyendo lo que es, cómo descargar e instalar, ¿cuáles son sus características y desventajas, y algunas preguntas frecuentes.

-

choque mini mod apk nueva versión


Download Zip === https://bltlly.com/2v6IYE



-

¿Qué es Clash Mini?

-

Clash Mini es un divertido y estratégico juego de mesa que se basa en el popular universo Clash. En este juego, puedes reunir, convocar y actualizar tu ejército de minis, que son versiones en miniatura de los personajes familiares de Clash of Clans y Clash Royale. A continuación, puede organizar sus esbirros en un tablero y verlos chocar con los esbirros de su oponente en tiempo real. También puedes usar héroes, como Barbarian King, Archer Queen, Shield Maiden y más, para dirigir tu ejército y activar habilidades especiales.

-

Clash Mini es un spin-off de los juegos originales de Clash, pero tiene su propia mecánica de juego y características únicas. No es solo un juego de fuerza bruta; es un juego de estrategia. Tienes que anticipar los movimientos de tu oponente y armar tu formación ganadora. También tienes que cambiar y actualizar tus minis entre rondas para adaptarte a la situación cambiante. Puedes jugar en modo 1v1 o rumble contra otros 7 jugadores. También puedes jugar casualmente por diversión o en partidos clasificados para aumentar tu posición en la liga.

- -

¿Qué es Clash Mini Mod APK?

-

Clash Mini Mod APK es una versión modificada del juego original que le permite acceder a recursos ilimitados y características que no están disponibles en la versión oficial. Es una aplicación de terceros que no está autorizada o avalada por Supercell. Suele ser creado por hackers o modders que alteran los archivos del juego para darte una ventaja injusta sobre otros jugadores.

-

-

Clash Mini Mod APK es una manera de acceder a recursos ilimitados y características que pueden mejorar su experiencia de juego y hacer que el progreso más rápido en el juego. Puedes obtener gemas ilimitadas, monedas, elixir, minis, héroes, skins, modos de juego, niveles y más. También puede evitar algunas de las restricciones y desafíos que la versión oficial le impone. También puedes disfrutar de algunas características exclusivas que no están disponibles en el juego original, como mods personalizados, trucos, hacks y más.

-

Clash Mini Mod APK es también un riesgo potencial para su dispositivo y cuenta. Puesto que no está verificado o protegido por Supercell, puede contener malware o virus que pueden dañar su dispositivo o robar su información personal. También puede violar los términos de servicio y la política de privacidad de Supercell, lo que puede resultar en prohibiciones o suspensiones de su cuenta. También puede causar problemas de compatibilidad o errores con las actualizaciones o servidores del juego. También puede dañar o eliminar el progreso y los datos del juego.

-

¿Cómo descargar e instalar Clash Mini Mod APK?

-

Si todavía desea probar Clash Mini Mod APK, debe seguir algunos pasos para descargarlo e instalarlo en su dispositivo. Estos son los pasos:

-
    -
  1. Encontrar una fuente confiable en línea. Es necesario encontrar un sitio web que ofrece Clash Mini Mod APK gratis y sin ninguna encuesta o verificación. Puede buscar en Google o utilizar algunos de los siguientes enlaces:
  2. - -
  3. Habilitar fuentes desconocidas en el dispositivo. Es necesario permitir que su dispositivo para instalar aplicaciones de fuentes desconocidas, ya que Clash Mini Mod APK no es de la tienda oficial de Google Play. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y activarlo.
  4. -
  5. Siga los pasos de instalación y poner en marcha el juego. Es necesario descargar el archivo Clash Mini Mod APK desde el sitio web y guardarlo en su dispositivo. Luego, debe abrir el archivo y tocar en Instalar. Espere a que termine la instalación y luego abra el juego. Es posible que necesite conceder algunos permisos o aceptar algunos términos y condiciones antes de poder jugar.
  6. -
-

¿Cuáles son las características de Clash Mini Mod APK?

-

Clash Mini Mod APK tiene muchas características que pueden hacer su juego más agradable y más fácil. Estas son algunas de las características:

-
    -
  • Joyas, monedas y elixir ilimitados. Puedes obtener cantidades ilimitadas de gemas, monedas y elixir, que son las principales monedas del juego. Puedes usarlos para comprar, actualizar y desbloquear todo lo que quieras en el juego.
  • -
  • Desbloquea todos los minis, héroes y skins. Puedes desbloquear todos los minis, héroes y skins que están disponibles en el juego, incluso aquellos que son exclusivos o limitados. Puedes usarlos para crear tu ejército definitivo y personalizar tu apariencia.
  • -
  • Acceder a todos los modos de juego y niveles. Puedes acceder a todos los modos de juego y niveles que están disponibles en el juego, incluso aquellos que están bloqueados o restringidos. Puedes jugar 1v1 o modo rumble contra cualquier jugador o IA. También puedes jugar casualmente o en partidos clasificados sin limitaciones.
  • -

¿Cuáles son los inconvenientes de Clash Mini Mod APK?

-

Clash Mini Mod APK puede sonar tentador, pero también tiene algunos inconvenientes que usted debe tener en cuenta. Aquí están algunos de los inconvenientes:

-
    - -
  • Posibles prohibiciones y suspensiones. Clash Mini Mod APK viola los términos de servicio y la política de privacidad de Supercell, por lo que puede resultar en prohibiciones o suspensiones de su cuenta. Supercell tiene el derecho de detectar y castigar cualquier actividad de engaño o piratería en el juego. Puede perder el acceso a su cuenta o enfrentar acciones legales.
  • -
  • Posible pérdida de progreso y datos del juego. Clash Mini Mod APK puede causar problemas de compatibilidad o errores con las actualizaciones del juego o servidores, por lo que puede dañar o eliminar el progreso del juego y los datos. Puedes perder todos tus logros, recompensas y compras en el juego. Es posible que tampoco puedas sincronizar tu cuenta con otros dispositivos o plataformas.
  • -
-

Conclusión

-

Clash Mini es un divertido y estratégico juego de mesa que se basa en el popular universo Clash. Es un luchador automático en tiempo real con combinaciones dinámicas y un sinfín de posibilidades. Puedes reunir, invocar y actualizar tu ejército de minis y héroes, y chocar con otros jugadores en modo 1v1 o rumble.

-

Clash Mini Mod APK es una versión modificada del juego original que le permite acceder a recursos ilimitados y características que no están disponibles en la versión oficial. Es una forma de mejorar tu experiencia de juego y hacerte progresar más rápido en el juego. Sin embargo, también es un riesgo potencial para su dispositivo y cuenta, ya que puede contener malware o virus, violar los términos de servicio y la política de privacidad de Supercell, y causar problemas de compatibilidad o errores con las actualizaciones o servidores del juego.

-

Si desea probar Clash Mini Mod APK, necesita encontrar una fuente confiable en línea, habilitar fuentes desconocidas en su dispositivo, seguir los pasos de instalación y lanzar el juego. A continuación, puede disfrutar de gemas ilimitadas, monedas, elixir, minis, héroes, pieles, modos de juego, niveles y más. Sin embargo, también debe tener cuidado de los inconvenientes y consecuencias que pueden venir con el uso de Clash Mini Mod APK.

-

Preguntas frecuentes

-
    -
  1. Q: Es Clash Mini Mod APK seguro de usar?
  2. - -
  3. Q: Es Clash Mini Mod APK descarga gratuita?
  4. -
  5. A: Clash Mini Mod APK es gratis para descargar desde algunos sitios web en línea, pero siempre debe tener cuidado con la fuente y escanear el archivo antes de instalarlo. Algunos sitios web pueden pedirle que complete encuestas o verificación antes de descargar el archivo, que puede ser estafas o intentos de phishing.
  6. -
  7. Q: ¿Puedo jugar Clash Mini Mod APK con otros jugadores?
  8. -
  9. A: Clash Mini Mod APK le permite jugar 1v1 o modo rumble con otros jugadores, pero no puede ser capaz de jugar con jugadores que están utilizando la versión oficial del juego. También puede enfrentar la competencia desleal de otros jugadores que están utilizando Clash Mini Mod APK con trucos o hacks.
  10. -
  11. Q: ¿Puedo sincronizar mi cuenta Clash Mini Mod APK con otros dispositivos o plataformas?
  12. -
  13. A: Clash Mini Mod APK no puede permitirle sincronizar su cuenta con otros dispositivos o plataformas, ya que no está conectado a los servidores oficiales de Supercell. Puedes perder el progreso del juego y los datos si cambias de dispositivo o plataforma.
  14. -
  15. Q: ¿Puedo actualizar Clash Mini Mod APK cuando hay una nueva versión del juego?
  16. -
  17. A: Clash Mini Mod APK puede no ser compatible con la nueva versión del juego, ya que no está actualizado por Supercell. Es posible que tenga que encontrar una nueva versión de Clash Mini Mod APK en línea e instalarlo de nuevo en su dispositivo.
  18. -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Blessin/impro-scene-generator/README.md b/spaces/Blessin/impro-scene-generator/README.md deleted file mode 100644 index c1f84ff94be3cc76691d7a5dc40496683a03012b..0000000000000000000000000000000000000000 --- a/spaces/Blessin/impro-scene-generator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Impro Scene Generator -emoji: 😻 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/README.md deleted file mode 100644 index 9bd8b57c1a5f15e391eb63b690f1051b1ad79d21..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/README.md +++ /dev/null @@ -1,10 +0,0 @@ - -This directory contains code to prepare a detectron2 model for deployment. -Currently it supports exporting a detectron2 model to Caffe2 format through ONNX. - -Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage. - - -### Acknowledgements - -Thanks to Mobile Vision team at Facebook for developing the conversion tools. diff --git a/spaces/CVPR/WALT/mmdet/utils/__init__.py b/spaces/CVPR/WALT/mmdet/utils/__init__.py deleted file mode 100644 index e79ad8c02a2d465f0690a4aa80683a5c6d784d52..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .collect_env import collect_env -from .logger import get_root_logger -from .optimizer import DistOptimizerHook - -__all__ = ['get_root_logger', 'collect_env', 'DistOptimizerHook'] diff --git a/spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/dataset.py b/spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/dataset.py deleted file mode 100644 index 605aa877f7031a5cd2b98c0f831410aa80fddefa..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/models/ade20k/segm_lib/utils/data/dataset.py +++ /dev/null @@ -1,118 +0,0 @@ -import bisect -import warnings - -from torch._utils import _accumulate -from torch import randperm - - -class Dataset(object): - """An abstract class representing a Dataset. - - All other datasets should subclass it. All subclasses should override - ``__len__``, that provides the size of the dataset, and ``__getitem__``, - supporting integer indexing in range from 0 to len(self) exclusive. - """ - - def __getitem__(self, index): - raise NotImplementedError - - def __len__(self): - raise NotImplementedError - - def __add__(self, other): - return ConcatDataset([self, other]) - - -class TensorDataset(Dataset): - """Dataset wrapping data and target tensors. - - Each sample will be retrieved by indexing both tensors along the first - dimension. - - Arguments: - data_tensor (Tensor): contains sample data. - target_tensor (Tensor): contains sample targets (labels). - """ - - def __init__(self, data_tensor, target_tensor): - assert data_tensor.size(0) == target_tensor.size(0) - self.data_tensor = data_tensor - self.target_tensor = target_tensor - - def __getitem__(self, index): - return self.data_tensor[index], self.target_tensor[index] - - def __len__(self): - return self.data_tensor.size(0) - - -class ConcatDataset(Dataset): - """ - Dataset to concatenate multiple datasets. - Purpose: useful to assemble different existing datasets, possibly - large-scale datasets as the concatenation operation is done in an - on-the-fly manner. - - Arguments: - datasets (iterable): List of datasets to be concatenated - """ - - @staticmethod - def cumsum(sequence): - r, s = [], 0 - for e in sequence: - l = len(e) - r.append(l + s) - s += l - return r - - def __init__(self, datasets): - super(ConcatDataset, self).__init__() - assert len(datasets) > 0, 'datasets should not be an empty iterable' - self.datasets = list(datasets) - self.cumulative_sizes = self.cumsum(self.datasets) - - def __len__(self): - return self.cumulative_sizes[-1] - - def __getitem__(self, idx): - dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) - if dataset_idx == 0: - sample_idx = idx - else: - sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] - return self.datasets[dataset_idx][sample_idx] - - @property - def cummulative_sizes(self): - warnings.warn("cummulative_sizes attribute is renamed to " - "cumulative_sizes", DeprecationWarning, stacklevel=2) - return self.cumulative_sizes - - -class Subset(Dataset): - def __init__(self, dataset, indices): - self.dataset = dataset - self.indices = indices - - def __getitem__(self, idx): - return self.dataset[self.indices[idx]] - - def __len__(self): - return len(self.indices) - - -def random_split(dataset, lengths): - """ - Randomly split a dataset into non-overlapping new datasets of given lengths - ds - - Arguments: - dataset (Dataset): Dataset to be split - lengths (iterable): lengths of splits to be produced - """ - if sum(lengths) != len(dataset): - raise ValueError("Sum of input lengths does not equal the length of the input dataset!") - - indices = randperm(sum(lengths)) - return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)] diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/bad_news/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/bad_news/__init__.py deleted file mode 100644 index 9a56c6354289015f6404ee4ed77dce84a117d5e3..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/bad_news/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from pathlib import Path -from typing import List - -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.exception import TextOverLength - -img_dir = Path(__file__).parent / "images" - - -def bad_news(images, texts: List[str], args): - text = texts[0] - frame = BuildImage.open(img_dir / "0.png") - try: - frame.draw_text( - (50, 100, frame.width - 50, frame.height - 100), - text, - allow_wrap=True, - lines_align="center", - max_fontsize=60, - min_fontsize=30, - fill=(0, 0, 0), - stroke_ratio=1 / 15, - stroke_fill="white", - ) - except ValueError: - raise TextOverLength(text) - return frame.save_png() - - -add_meme( - "bad_news", - bad_news, - min_texts=1, - max_texts=1, - default_texts=["喜报"], - keywords=["悲报"], -) diff --git a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/libJPG/jpgd.h b/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/libJPG/jpgd.h deleted file mode 100644 index a1c0cac61839a6f66a42c341f50d5e36faad9a93..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/libJPG/jpgd.h +++ /dev/null @@ -1,316 +0,0 @@ -// jpgd.h - C++ class for JPEG decompression. -// Public domain, Rich Geldreich -#ifndef JPEG_DECODER_H -#define JPEG_DECODER_H - -#include -#include -#include - -namespace jpgd -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef unsigned short uint16; - typedef unsigned int uint; - typedef signed int int32; - - // Loads a JPEG image from a memory buffer or a file. - // req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA). - // On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB). - // Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly. - // Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp. -// BEGIN EPIC MOD -//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps); - unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format); -// END EPIC MOD - unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps); - - // Success/failure error codes. - enum jpgd_status - { - JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1, - JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE, - JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS, - JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH, - JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER, - JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS, - JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE, - JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR, - JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM - }; - - // Input stream interface. - // Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available. - // The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set. - // It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer. - // Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding. - class jpeg_decoder_stream - { - public: - jpeg_decoder_stream() { } - virtual ~jpeg_decoder_stream() { } - - // The read() method is called when the internal input buffer is empty. - // Parameters: - // pBuf - input buffer - // max_bytes_to_read - maximum bytes that can be written to pBuf - // pEOF_flag - set this to true if at end of stream (no more bytes remaining) - // Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0). - // Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full. - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0; - }; - - // stdio FILE stream class. - class jpeg_decoder_file_stream : public jpeg_decoder_stream - { - jpeg_decoder_file_stream(const jpeg_decoder_file_stream &); - jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &); - - FILE *m_pFile; - bool m_eof_flag, m_error_flag; - - public: - jpeg_decoder_file_stream(); - virtual ~jpeg_decoder_file_stream(); - - bool open(const char *Pfilename); - void close(); - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Memory stream class. - class jpeg_decoder_mem_stream : public jpeg_decoder_stream - { - const uint8 *m_pSrc_data; - uint m_ofs, m_size; - - public: - jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { } - jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { } - - virtual ~jpeg_decoder_mem_stream() { } - - bool open(const uint8 *pSrc_data, uint size); - void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; } - - virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag); - }; - - // Loads JPEG file from a jpeg_decoder_stream. - unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps); - - enum - { - JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4, - JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384 - }; - - typedef int16 jpgd_quant_t; - typedef int16 jpgd_block_t; - - class jpeg_decoder - { - public: - // Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc. - // methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline. - jpeg_decoder(jpeg_decoder_stream *pStream); - - ~jpeg_decoder(); - - // Call this method after constructing the object to begin decompression. - // If JPGD_SUCCESS is returned you may then call decode() on each scanline. - int begin_decoding(); - - // Returns the next scan line. - // For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1). - // Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4). - // Returns JPGD_SUCCESS if a scan line has been returned. - // Returns JPGD_DONE if all scan lines have been returned. - // Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info. - int decode(const void** pScan_line, uint* pScan_line_len); - - inline jpgd_status get_error_code() const { return m_error_code; } - - inline int get_width() const { return m_image_x_size; } - inline int get_height() const { return m_image_y_size; } - - inline int get_num_components() const { return m_comps_in_frame; } - - inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; } - inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); } - - // Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file). - inline int get_total_bytes_read() const { return m_total_bytes_read; } - - private: - jpeg_decoder(const jpeg_decoder &); - jpeg_decoder &operator =(const jpeg_decoder &); - - typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int); - - struct huff_tables - { - bool ac_table; - uint look_up[256]; - uint look_up2[256]; - uint8 code_size[256]; - uint tree[512]; - }; - - struct coeff_buf - { - uint8 *pData; - int block_num_x, block_num_y; - int block_len_x, block_len_y; - int block_size; - }; - - struct mem_block - { - mem_block *m_pNext; - size_t m_used_count; - size_t m_size; - char m_data[1]; - }; - - jmp_buf m_jmp_state; - mem_block *m_pMem_blocks; - int m_image_x_size; - int m_image_y_size; - jpeg_decoder_stream *m_pStream; - int m_progressive_flag; - uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES]; - uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size - uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size - jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables - int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported) - int m_comps_in_frame; // # of components in frame - int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor - int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor - int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector - int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID - int m_comp_h_blocks[JPGD_MAX_COMPONENTS]; - int m_comp_v_blocks[JPGD_MAX_COMPONENTS]; - int m_comps_in_scan; // # of components in scan - int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan - int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector - int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector - int m_spectral_start; // spectral selection start - int m_spectral_end; // spectral selection end - int m_successive_low; // successive approximation low - int m_successive_high; // successive approximation high - int m_max_mcu_x_size; // MCU's max. X size in pixels - int m_max_mcu_y_size; // MCU's max. Y size in pixels - int m_blocks_per_mcu; - int m_max_blocks_per_row; - int m_mcus_per_row, m_mcus_per_col; - int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU]; - int m_total_lines_left; // total # lines left in image - int m_mcu_lines_left; // total # lines left in this MCU - int m_real_dest_bytes_per_scan_line; - int m_dest_bytes_per_scan_line; // rounded up - int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y) - huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES]; - coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS]; - coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS]; - int m_eob_run; - int m_block_y_mcu[JPGD_MAX_COMPONENTS]; - uint8* m_pIn_buf_ofs; - int m_in_buf_left; - int m_tem_flag; - bool m_eof_flag; - uint8 m_in_buf_pad_start[128]; - uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128]; - uint8 m_in_buf_pad_end[128]; - int m_bits_left; - uint m_bit_buf; - int m_restart_interval; - int m_restarts_left; - int m_next_restart_num; - int m_max_mcus_per_row; - int m_max_blocks_per_mcu; - int m_expanded_blocks_per_mcu; - int m_expanded_blocks_per_row; - int m_expanded_blocks_per_component; - bool m_freq_domain_chroma_upsample; - int m_max_mcus_per_col; - uint m_last_dc_val[JPGD_MAX_COMPONENTS]; - jpgd_block_t* m_pMCU_coefficients; - int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU]; - uint8* m_pSample_buf; - int m_crr[256]; - int m_cbb[256]; - int m_crg[256]; - int m_cbg[256]; - uint8* m_pScan_line_0; - uint8* m_pScan_line_1; - jpgd_status m_error_code; - bool m_ready_flag; - int m_total_bytes_read; - - void free_all_blocks(); - // BEGIN EPIC MOD - UE_NORETURN void stop_decoding(jpgd_status status); - // END EPIC MOD - void *alloc(size_t n, bool zero = false); - void word_clear(void *p, uint16 c, uint n); - void prep_in_buffer(); - void read_dht_marker(); - void read_dqt_marker(); - void read_sof_marker(); - void skip_variable_marker(); - void read_dri_marker(); - void read_sos_marker(); - int next_marker(); - int process_markers(); - void locate_soi_marker(); - void locate_sof_marker(); - int locate_sos_marker(); - void init(jpeg_decoder_stream * pStream); - void create_look_ups(); - void fix_in_buffer(); - void transform_mcu(int mcu_row); - void transform_mcu_expand(int mcu_row); - coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y); - inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y); - void load_next_row(); - void decode_next_row(); - void make_huff_table(int index, huff_tables *pH); - void check_quant_tables(); - void check_huff_tables(); - void calc_mcu_block_order(); - int init_scan(); - void init_frame(); - void process_restart(); - void decode_scan(pDecode_block_func decode_block_func); - void init_progressive(); - void init_sequential(); - void decode_start(); - void decode_init(jpeg_decoder_stream * pStream); - void H2V2Convert(); - void H2V1Convert(); - void H1V2Convert(); - void H1V1Convert(); - void gray_convert(); - void expanded_convert(); - void find_eoi(); - inline uint get_char(); - inline uint get_char(bool *pPadding_flag); - inline void stuff_char(uint8 q); - inline uint8 get_octet(); - inline uint get_bits(int num_bits); - inline uint get_bits_no_markers(int numbits); - inline int huff_decode(huff_tables *pH); - inline int huff_decode(huff_tables *pH, int& extrabits); - static inline uint8 clamp(int i); - static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y); - static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y); - }; - -} // namespace jpgd - -#endif // JPEG_DECODER_H diff --git a/spaces/Cran-May/yugangVI/app.py b/spaces/Cran-May/yugangVI/app.py deleted file mode 100644 index ba645a0a1a98621ec34f1f995d3c3a64ca386918..0000000000000000000000000000000000000000 --- a/spaces/Cran-May/yugangVI/app.py +++ /dev/null @@ -1,250 +0,0 @@ -from typing import Iterator - -import gradio as gr - - -from model import run - -DEFAULT_SYSTEM_PROMPT = "" -MAX_MAX_NEW_TOKENS = 2048 -DEFAULT_MAX_NEW_TOKENS = 1024 -MAX_INPUT_TOKEN_LENGTH = 4000 - -DESCRIPTION = """ -# yugangVI-Chat -""" -LICENSE="" - - - -def clear_and_save_textbox(message: str) -> tuple[str, str]: - return '', message - - -def display_input(message: str, - history: list[tuple[str, str]]) -> list[tuple[str, str]]: - history.append((message, '')) - return history - - -def delete_prev_fn( - history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]: - try: - message, _ = history.pop() - except IndexError: - message = '' - return history, message or '' - - -def generate( - message: str, - history_with_input: list[tuple[str, str]], - system_prompt: str, - max_new_tokens: int, - temperature: float, - top_p: float, - top_k: int, -) -> Iterator[list[tuple[str, str]]]: - - history = history_with_input[:-1] - generator = run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k) - for response in generator: - yield history + [(message, response)] - - -def process_example(message: str) -> tuple[str, list[tuple[str, str]]]: - generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 8192, 1, 0.95, 50) - for x in generator: - pass - return '', x - - -def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None: - a = 1 - - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - gr.DuplicateButton(value='Duplicate Space for private use', - elem_id='duplicate-button') - - with gr.Group(): - chatbot = gr.Chatbot(label='Chatbot') - with gr.Row(): - textbox = gr.Textbox( - container=False, - show_label=False, - placeholder='Type a message...', - scale=10, - ) - submit_button = gr.Button('Submit', - variant='primary', - scale=1, - min_width=0) - with gr.Row(): - retry_button = gr.Button('🔄 Retry', variant='secondary') - undo_button = gr.Button('↩️ Undo', variant='secondary') - clear_button = gr.Button('🗑️ Clear', variant='secondary') - - saved_input = gr.State() - - with gr.Accordion(label='Advanced options', open=False): - system_prompt = gr.Textbox(label='System prompt', - value=DEFAULT_SYSTEM_PROMPT, - lines=6) - max_new_tokens = gr.Slider( - label='Max new tokens', - minimum=1, - maximum=MAX_MAX_NEW_TOKENS, - step=1, - value=DEFAULT_MAX_NEW_TOKENS, - ) - temperature = gr.Slider( - label='Temperature', - minimum=0.1, - maximum=4.0, - step=0.1, - value=0.3, - ) - top_p = gr.Slider( - label='Top-p (nucleus sampling)', - minimum=0.05, - maximum=1.0, - step=0.05, - value=0.85, - ) - top_k = gr.Slider( - label='Top-k', - minimum=1, - maximum=1000, - step=1, - value=5, - ) - - gr.Examples( - examples=[ - '世界上第二高的山峰是哪座', - - ], - inputs=textbox, - outputs=[textbox, chatbot], - fn=process_example, - cache_examples=True, - ) - - gr.Markdown(LICENSE) - - textbox.submit( - fn=clear_and_save_textbox, - inputs=textbox, - outputs=[textbox, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - fn=check_input_token_length, - inputs=[saved_input, chatbot, system_prompt], - api_name=False, - queue=False, - ).success( - fn=generate, - inputs=[ - saved_input, - chatbot, - system_prompt, - max_new_tokens, - temperature, - top_p, - top_k, - ], - outputs=chatbot, - api_name=False, - ) - - button_event_preprocess = submit_button.click( - fn=clear_and_save_textbox, - inputs=textbox, - outputs=[textbox, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - fn=check_input_token_length, - inputs=[saved_input, chatbot, system_prompt], - api_name=False, - queue=False, - ).success( - fn=generate, - inputs=[ - saved_input, - chatbot, - system_prompt, - max_new_tokens, - temperature, - top_p, - top_k, - ], - outputs=chatbot, - api_name=False, - ) - - retry_button.click( - fn=delete_prev_fn, - inputs=chatbot, - outputs=[chatbot, saved_input], - api_name=False, - queue=False, - ).then( - fn=display_input, - inputs=[saved_input, chatbot], - outputs=chatbot, - api_name=False, - queue=False, - ).then( - fn=generate, - inputs=[ - saved_input, - chatbot, - system_prompt, - max_new_tokens, - temperature, - top_p, - top_k, - ], - outputs=chatbot, - api_name=False, - ) - - undo_button.click( - - fn=delete_prev_fn, - inputs=chatbot, - outputs=[chatbot, saved_input], - api_name=False, - queue=False, - ).then( - fn=lambda x: x, - inputs=[saved_input], - outputs=textbox, - api_name=False, - queue=False, - ) - - clear_button.click( - fn=lambda: ([], ''), - outputs=[chatbot, saved_input], - queue=False, - api_name=False, - ) - -demo.queue(max_size=20).launch() diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/runners/__init__.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/runners/__init__.py deleted file mode 100644 index 8ffe5b0b10e013fb6d69eb6879b1e42c06d5b447..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/runners/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from video_llama.runners.runner_base import RunnerBase - -__all__ = ["RunnerBase"] diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/xmlWriter.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/xmlWriter.py deleted file mode 100644 index 9a8dc3e3b7fe5eb13ea4b7ea369ced1da5555471..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/xmlWriter.py +++ /dev/null @@ -1,204 +0,0 @@ -"""xmlWriter.py -- Simple XML authoring class""" - -from fontTools.misc.textTools import byteord, strjoin, tobytes, tostr -import sys -import os -import string - -INDENT = " " - - -class XMLWriter(object): - def __init__( - self, - fileOrPath, - indentwhite=INDENT, - idlefunc=None, - encoding="utf_8", - newlinestr="\n", - ): - if encoding.lower().replace("-", "").replace("_", "") != "utf8": - raise Exception("Only UTF-8 encoding is supported.") - if fileOrPath == "-": - fileOrPath = sys.stdout - if not hasattr(fileOrPath, "write"): - self.filename = fileOrPath - self.file = open(fileOrPath, "wb") - self._closeStream = True - else: - self.filename = None - # assume writable file object - self.file = fileOrPath - self._closeStream = False - - # Figure out if writer expects bytes or unicodes - try: - # The bytes check should be first. See: - # https://github.com/fonttools/fonttools/pull/233 - self.file.write(b"") - self.totype = tobytes - except TypeError: - # This better not fail. - self.file.write("") - self.totype = tostr - self.indentwhite = self.totype(indentwhite) - if newlinestr is None: - self.newlinestr = self.totype(os.linesep) - else: - self.newlinestr = self.totype(newlinestr) - self.indentlevel = 0 - self.stack = [] - self.needindent = 1 - self.idlefunc = idlefunc - self.idlecounter = 0 - self._writeraw('') - self.newline() - - def __enter__(self): - return self - - def __exit__(self, exception_type, exception_value, traceback): - self.close() - - def close(self): - if self._closeStream: - self.file.close() - - def write(self, string, indent=True): - """Writes text.""" - self._writeraw(escape(string), indent=indent) - - def writecdata(self, string): - """Writes text in a CDATA section.""" - self._writeraw("") - - def write8bit(self, data, strip=False): - """Writes a bytes() sequence into the XML, escaping - non-ASCII bytes. When this is read in xmlReader, - the original bytes can be recovered by encoding to - 'latin-1'.""" - self._writeraw(escape8bit(data), strip=strip) - - def write_noindent(self, string): - """Writes text without indentation.""" - self._writeraw(escape(string), indent=False) - - def _writeraw(self, data, indent=True, strip=False): - """Writes bytes, possibly indented.""" - if indent and self.needindent: - self.file.write(self.indentlevel * self.indentwhite) - self.needindent = 0 - s = self.totype(data, encoding="utf_8") - if strip: - s = s.strip() - self.file.write(s) - - def newline(self): - self.file.write(self.newlinestr) - self.needindent = 1 - idlecounter = self.idlecounter - if not idlecounter % 100 and self.idlefunc is not None: - self.idlefunc() - self.idlecounter = idlecounter + 1 - - def comment(self, data): - data = escape(data) - lines = data.split("\n") - self._writeraw("") - - def simpletag(self, _TAG_, *args, **kwargs): - attrdata = self.stringifyattrs(*args, **kwargs) - data = "<%s%s/>" % (_TAG_, attrdata) - self._writeraw(data) - - def begintag(self, _TAG_, *args, **kwargs): - attrdata = self.stringifyattrs(*args, **kwargs) - data = "<%s%s>" % (_TAG_, attrdata) - self._writeraw(data) - self.stack.append(_TAG_) - self.indent() - - def endtag(self, _TAG_): - assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" - del self.stack[-1] - self.dedent() - data = "" % _TAG_ - self._writeraw(data) - - def dumphex(self, data): - linelength = 16 - hexlinelength = linelength * 2 - chunksize = 8 - for i in range(0, len(data), linelength): - hexline = hexStr(data[i : i + linelength]) - line = "" - white = "" - for j in range(0, hexlinelength, chunksize): - line = line + white + hexline[j : j + chunksize] - white = " " - self._writeraw(line) - self.newline() - - def indent(self): - self.indentlevel = self.indentlevel + 1 - - def dedent(self): - assert self.indentlevel > 0 - self.indentlevel = self.indentlevel - 1 - - def stringifyattrs(self, *args, **kwargs): - if kwargs: - assert not args - attributes = sorted(kwargs.items()) - elif args: - assert len(args) == 1 - attributes = args[0] - else: - return "" - data = "" - for attr, value in attributes: - if not isinstance(value, (bytes, str)): - value = str(value) - data = data + ' %s="%s"' % (attr, escapeattr(value)) - return data - - -def escape(data): - data = tostr(data, "utf_8") - data = data.replace("&", "&") - data = data.replace("<", "<") - data = data.replace(">", ">") - data = data.replace("\r", " ") - return data - - -def escapeattr(data): - data = escape(data) - data = data.replace('"', """) - return data - - -def escape8bit(data): - """Input is Unicode string.""" - - def escapechar(c): - n = ord(c) - if 32 <= n <= 127 and c not in "<&>": - return c - else: - return "&#" + repr(n) + ";" - - return strjoin(map(escapechar, data.decode("latin-1"))) - - -def hexStr(s): - h = string.hexdigits - r = "" - for c in s: - i = byteord(c) - r = r + h[(i >> 4) & 0xF] + h[i & 0xF] - return r diff --git a/spaces/Detomo/aisatsu-api/README.md b/spaces/Detomo/aisatsu-api/README.md deleted file mode 100644 index 5f008b3d18c2e092aebbbd319a4e1b09e221c88b..0000000000000000000000000000000000000000 --- a/spaces/Detomo/aisatsu-api/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Aisatsu Api -emoji: 🏢 -colorFrom: yellow -colorTo: yellow -sdk: docker -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/biggan/pytorch_biggan/setup.py b/spaces/Dinoking/Guccio-AI-Designer/models/biggan/pytorch_biggan/setup.py deleted file mode 100644 index a34318b6b66f1ca7b15342dea3c23eb904974d6d..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/biggan/pytorch_biggan/setup.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py - -To create the package for pypi. - -1. Change the version in __init__.py and setup.py. - -2. Commit these changes with the message: "Release: VERSION" - -3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " - Push the tag to git: git push --tags origin master - -4. Build both the sources and the wheel. Do not change anything in setup.py between - creating the wheel and the source distribution (obviously). - - For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory. - (this will build a wheel for the python version you use to build it - make sure you use python 3.x). - - For the sources, run: "python setup.py sdist" - You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp. - -5. Check that everything looks correct by uploading the package to the pypi test server: - - twine upload dist/* -r pypitest - (pypi suggest using twine as other methods upload files via plaintext.) - - Check that you can install it in a virtualenv by running: - pip install -i https://testpypi.python.org/pypi allennlp - -6. Upload the final version to actual pypi: - twine upload dist/* -r pypi - -7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. - -""" -from io import open -from setuptools import find_packages, setup - -setup( - name="pytorch_pretrained_biggan", - version="0.1.0", - author="Thomas Wolf", - author_email="thomas@huggingface.co", - description="PyTorch version of DeepMind's BigGAN model with pre-trained models", - long_description=open("README.md", "r", encoding='utf-8').read(), - long_description_content_type="text/markdown", - keywords='BIGGAN GAN deep learning google deepmind', - license='Apache', - url="https://github.com/huggingface/pytorch-pretrained-BigGAN", - packages=find_packages(exclude=["*.tests", "*.tests.*", - "tests.*", "tests"]), - install_requires=['torch>=0.4.1', - 'numpy', - 'boto3', - 'requests', - 'tqdm'], - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - "pytorch_pretrained_biggan=pytorch_pretrained_biggan.convert_tf_to_pytorch:main", - ] - }, - classifiers=[ - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: 3', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - ], -) diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/acesummarize.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/acesummarize.py deleted file mode 100644 index 345129245b461f44ef58538f02a08c3684d33f31..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/acesummarize.py +++ /dev/null @@ -1,62 +0,0 @@ -import os, sys, numpy, torch, argparse, skimage, json, shutil -from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from matplotlib.figure import Figure -from matplotlib.ticker import MaxNLocator -import matplotlib - -def main(): - parser = argparse.ArgumentParser(description='ACE optimization utility', - prog='python -m netdissect.aceoptimize') - parser.add_argument('--classname', type=str, default=None, - help='intervention classname') - parser.add_argument('--layer', type=str, default='layer4', - help='layer name') - parser.add_argument('--l2_lambda', type=float, nargs='+', - help='l2 regularizer hyperparameter') - parser.add_argument('--outdir', type=str, default=None, - help='dissection directory') - parser.add_argument('--variant', type=str, default=None, - help='experiment variant') - args = parser.parse_args() - - if args.variant is None: - args.variant = 'ace' - - run_command(args) - -def run_command(args): - fig = Figure(figsize=(4.5,3.5)) - FigureCanvas(fig) - ax = fig.add_subplot(111) - for l2_lambda in args.l2_lambda: - variant = args.variant - if l2_lambda != 0.01: - variant += '_reg%g' % l2_lambda - - dirname = os.path.join(args.outdir, args.layer, variant, args.classname) - snapshots = os.path.join(dirname, 'snapshots') - try: - dat = [torch.load(os.path.join(snapshots, 'epoch-%d.pth' % i)) - for i in range(10)] - except: - print('Missing %s snapshots' % dirname) - return - print('reg %g' % l2_lambda) - for i in range(10): - print(i, dat[i]['avg_loss'], - len((dat[i]['ablation'] == 1).nonzero())) - - ax.plot([dat[i]['avg_loss'] for i in range(10)], - label='reg %g' % l2_lambda) - ax.set_title('%s %s' % (args.classname, args.variant)) - ax.grid(True) - ax.legend() - ax.set_ylabel('Loss') - ax.set_xlabel('Epochs') - fig.tight_layout() - dirname = os.path.join(args.outdir, args.layer, - args.variant, args.classname) - fig.savefig(os.path.join(dirname, 'loss-plot.png')) - -if __name__ == '__main__': - main() diff --git a/spaces/DrHakase/full-body-anime-gan/app.py b/spaces/DrHakase/full-body-anime-gan/app.py deleted file mode 100644 index 3f354feb89dacdc21a2d482da5d6a4a828374d60..0000000000000000000000000000000000000000 --- a/spaces/DrHakase/full-body-anime-gan/app.py +++ /dev/null @@ -1,385 +0,0 @@ -import random -import gradio as gr -import imageio -import numpy as np -import onnx -import onnxruntime as rt -import huggingface_hub -from numpy.random import RandomState -from skimage import transform - - -def get_inter(r1, r2): - h_inter = max(min(r1[3], r2[3]) - max(r1[1], r2[1]), 0) - w_inter = max(min(r1[2], r2[2]) - max(r1[0], r2[0]), 0) - return h_inter * w_inter - - -def iou(r1, r2): - s1 = (r1[2] - r1[0]) * (r1[3] - r1[1]) - s2 = (r2[2] - r2[0]) * (r2[3] - r2[1]) - i = get_inter(r1, r2) - return i / (s1 + s2 - i) - - -def letterbox(im, new_shape=(640, 640), color=(0.5, 0.5, 0.5), stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - - # Compute padding - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape != new_unpad: # resize - im = transform.resize(im, (new_unpad[1], new_unpad[0])) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - - im_new = np.full((new_unpad[1] + top + bottom, new_unpad[0] + left + right, 3), color, dtype=np.float32) - im_new[top:new_unpad[1] + top, left:new_unpad[0] + left] = im - return im_new - - -def nms(pred, conf_thres, iou_thres, max_instance=20): # pred (anchor_num, 5 + cls_num) - nc = pred.shape[1] - 5 - candidates = [list() for x in range(nc)] - for x in pred: - if x[4] < conf_thres: - continue - cls = np.argmax(x[5:]) - p = x[4] * x[5 + cls] - if conf_thres <= p: - box = (x[0] - x[2] / 2, x[1] - x[3] / 2, x[0] + x[2] / 2, x[1] + x[3] / 2) # xywh2xyxy - candidates[cls].append([p, box]) - result = [list() for x in range(nc)] - for i, candidate in enumerate(candidates): - candidate = sorted(candidate, key=lambda a: a[0], reverse=True) - candidate = candidate[:max_instance] - for x in candidate: - ok = True - for r in result[i]: - if iou(r[1], x[1]) > iou_thres: - ok = False - break - if ok: - result[i].append(x) - - return result - - -class Model: - def __init__(self): - self.detector = None - self.encoder = None - self.g_synthesis = None - self.g_mapping = None - self.detector_stride = None - self.detector_imgsz = None - self.detector_class_names = None - self.anime_seg = None - self.w_avg = None - self.load_models() - - def load_models(self): - g_mapping_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "g_mapping.onnx") - g_synthesis_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "g_synthesis.onnx") - encoder_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "encoder.onnx") - detector_path = huggingface_hub.hf_hub_download("skytnt/fbanime-gan", "waifu_dect.onnx") - anime_seg_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - - providers = ['CPUExecutionProvider'] - gpu_providers = ['CUDAExecutionProvider'] - g_mapping = onnx.load(g_mapping_path) - w_avg = [x for x in g_mapping.graph.initializer if x.name == "w_avg"][0] - w_avg = np.frombuffer(w_avg.raw_data, dtype=np.float32)[np.newaxis, :] - w_avg = w_avg.repeat(16, axis=0)[np.newaxis, :] - self.w_avg = w_avg - self.g_mapping = rt.InferenceSession(g_mapping_path, providers=gpu_providers + providers) - self.g_synthesis = rt.InferenceSession(g_synthesis_path, providers=gpu_providers + providers) - self.encoder = rt.InferenceSession(encoder_path, providers=providers) - self.detector = rt.InferenceSession(detector_path, providers=providers) - detector_meta = self.detector.get_modelmeta().custom_metadata_map - self.detector_stride = int(detector_meta['stride']) - self.detector_imgsz = 1088 - self.detector_class_names = eval(detector_meta['names']) - self.anime_seg = rt.InferenceSession(anime_seg_path, providers=providers) - - def get_img(self, w, noise=0): - img = self.g_synthesis.run(None, {'w': w, "noise": np.asarray([noise], dtype=np.float32)})[0] - return (img.transpose(0, 2, 3, 1) * 127.5 + 128).clip(0, 255).astype(np.uint8)[0] - - def get_w(self, z, psi1, psi2): - return self.g_mapping.run(None, {'z': z, 'psi': np.asarray([psi1, psi2], dtype=np.float32)})[0] - - def remove_bg(self, img, s=1024): - img0 = img - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = transform.resize(img, (h, w)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = self.anime_seg.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = transform.resize(mask, (h0, w0)) - img0 = (img0 * mask + 255 * (1 - mask)).astype(np.uint8) - return img0 - - def encode_img(self, img): - img = transform.resize(((img / 255 - 0.5) / 0.5), (256, 256)).transpose(2, 0, 1)[np.newaxis, :].astype( - np.float32) - return self.encoder.run(None, {'img': img})[0] + self.w_avg - - def detect(self, im0, conf_thres, iou_thres, detail=False): - if im0 is None: - return [] - img = letterbox((im0 / 255).astype(np.float32), (self.detector_imgsz, self.detector_imgsz), - stride=self.detector_stride) - # Convert - img = img.transpose(2, 0, 1) - img = img[np.newaxis, :] - pred = self.detector.run(None, {'images': img})[0][0] - dets = nms(pred, conf_thres, iou_thres) - imgs = [] - # Print results - s = '%gx%g ' % img.shape[2:] # print string - for i, det in enumerate(dets): - n = len(det) - s += f"{n} {self.detector_class_names[i]}{'s' * (n > 1)}, " # add to string - if detail: - print(s) - waifu_rects = [] - head_rects = [] - body_rects = [] - - for i, det in enumerate(dets): - for x in det: - # Rescale boxes from img_size to im0 size - wr = im0.shape[1] / img.shape[3] - hr = im0.shape[0] / img.shape[2] - x[1] = (int(x[1][0] * wr), int(x[1][1] * hr), - int(x[1][2] * wr), int(x[1][3] * hr)) - if i == 0: - head_rects.append(x[1]) - elif i == 1: - body_rects.append(x[1]) - elif i == 2: - waifu_rects.append(x[1]) - for j, waifu_rect in enumerate(waifu_rects): - msg = f'waifu {j + 1} ' - head_num = 0 - body_num = 0 - hr, br = None, None - for r in head_rects: - if get_inter(r, waifu_rect) / ((r[2] - r[0]) * (r[3] - r[1])) > 0.75: - hr = r - head_num += 1 - if head_num != 1: - if detail: - print(msg + f'head num error: {head_num}') - continue - for r in body_rects: - if get_inter(r, waifu_rect) / ((r[2] - r[0]) * (r[3] - r[1])) > 0.65: - br = r - body_num += 1 - if body_num != 1: - if detail: - print(msg + f'body num error: {body_num}') - continue - bounds = (min(waifu_rect[0], hr[0], br[0]), - min(waifu_rect[1], hr[1], br[1]), - max(waifu_rect[2], hr[2], br[2]), - max(waifu_rect[3], hr[3], br[3])) - if (bounds[2] - bounds[0]) / (bounds[3] - bounds[1]) > 0.7: - if detail: - print(msg + "ratio out of limit") - continue - expand_pixel = (bounds[3] - bounds[1]) // 20 - bounds = [max(bounds[0] - expand_pixel // 2, 0), - max(bounds[1] - expand_pixel, 0), - min(bounds[2] + expand_pixel // 2, im0.shape[1]), - min(bounds[3] + expand_pixel, im0.shape[0]), - ] - # corp and resize - w = bounds[2] - bounds[0] - h = bounds[3] - bounds[1] - bounds[3] += h % 2 - h += h % 2 - r = min(512 / w, 1024 / h) - pw, ph = int(512 / r - w), int(1024 / r - h) - bounds_tmp = (bounds[0] - pw // 2, bounds[1] - ph // 2, - bounds[2] + pw // 2 + pw % 2, bounds[3] + ph // 2 + ph % 2) - bounds = (max(0, bounds_tmp[0]), max(0, bounds_tmp[1]), - min(im0.shape[1], bounds_tmp[2]), min(im0.shape[0], bounds_tmp[3])) - dl = bounds[0] - bounds_tmp[0] - dr = bounds[2] - bounds_tmp[2] - dt = bounds[1] - bounds_tmp[1] - db = bounds[3] - bounds_tmp[3] - w = bounds_tmp[2] - bounds_tmp[0] - h = bounds_tmp[3] - bounds_tmp[1] - temp_img = np.full((h, w, 3), 255, dtype=np.uint8) - temp_img[dt:h + db, dl:w + dr] = im0[bounds[1]:bounds[3], bounds[0]:bounds[2]] - temp_img = transform.resize(temp_img, (1024, 512), preserve_range=True).astype(np.uint8) - imgs.append(temp_img) - return imgs - - # video 1-2 style - def gen_video(self, w1, w2, noise, path, frame_num=10): - video = imageio.get_writer(path, mode='I', fps=frame_num // 2, codec='libx264', bitrate='16M') - lin = np.linspace(0, 1, frame_num) - for i in range(0, frame_num): - img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise) - video.append_data(img) - video.close() - - - # video 1-2-1 style - def gen_video2(self, w1, w2, noise, path, frame_num=10): - video = imageio.get_writer(path, mode='I', fps=frame_num // 2, codec='libx264', bitrate='16M') - lin = np.linspace(0, 1, frame_num) - for i in range(0, frame_num): - img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise) - video.append_data(img) - for i in reversed(range(0, frame_num)): - img = self.get_img(((1 - lin[i]) * w1) + (lin[i] * w2), noise) - video.append_data(img) - video.close() - -def get_thumbnail(img): - img_new = np.full((256, 384, 3), 200, dtype=np.uint8) - img_new[:, 128:256] = transform.resize(img, (256, 128), preserve_range=True) - return img_new - - -def gen_fn(seed, random_seed, psi1, psi2, noise): - if random_seed: - seed = random.randint(0, 2 ** 32 - 1) - z = RandomState(int(seed)).randn(1, 1024) - w = model.get_w(z.astype(dtype=np.float32), psi1, psi2) - img_out = model.get_img(w, noise) - return img_out, seed, w, get_thumbnail(img_out) - - -def encode_img_fn(img, noise): - if img is None: - return "please upload a image", None, None, None, None - img = model.remove_bg(img) - imgs = model.detect(img, 0.2, 0.03) - if len(imgs) == 0: - return "failed to detect anime character", None, None, None, None - w = model.encode_img(imgs[0]) - img_out = model.get_img(w, noise) - return "success", imgs[0], img_out, w, get_thumbnail(img_out) - -def gen_video_fn(w1, w2, noise, frame): - if w1 is None or w2 is None: - return None -# model.gen_video(w1, w2, noise, "video.mp4", int(frame)) - model.gen_video2(w1, w2, noise, "video.mp4", int(frame)) - return "video.mp4" - - -if __name__ == '__main__': - model = Model() - - app = gr.Blocks() - with app: - gr.Markdown("# full-body anime GAN\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=o_ob.hf.full-body-anime-gan)\n" - "fork from [skytnt](https://huggingface.co/spaces/skytnt/full-body-anime-gan)\n\n" - "Image generation and blending using StyleGAN3 (not text2image, not Stable Diffusion)\n" - "psi1, psi2 are mapping parameters from nskytnt/fbanime-gan. The psi2 seems to have an effect on clothing, and the psi1 seems to have an effect on sexual styles such as breast enhancement [my experiment results](https://twitter.com/o_ob/status/1607860668543401984).\n" - "The video generation generates mp4 with the pattern 1→2→1 for easy comparison.\n\n" - "- StyleGAN3を使った画像生成とブレンドです(text2image, Stable Diffusionではありません)\n" - "- psi1,2は[nskytnt/fbanime-gan](https://github.com/SkyTNT/fbanimegan/tree/main/stylegan3)のmappingパラメータです。\n" - "- psi2は服に影響があり、psi1は胸の強調など性癖っぽいスタイルに影響があるようです([実験結果](https://twitter.com/o_ob/status/1607860668543401984))\n" - "- 動画生成は比較しやすいように 1→2→1 というパターンでmp4を生成します。\n") - with gr.Tabs(): - with gr.TabItem("generate image 新規画像生成"): - with gr.Row(): - with gr.Column(): - gr.Markdown("generate image") - with gr.Row(): - gen_input1 = gr.Slider(minimum=0, maximum=2 ** 32 - 1, step=1, value=0, label="seed") - gen_input2 = gr.Checkbox(label="Random", value=True) - gen_input3 = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="truncation psi 1") - gen_input4 = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="truncation psi 2") - gen_input5 = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="noise strength") - with gr.Group(): - gen_submit = gr.Button("Generate", variant="primary") - with gr.Column(): - gen_output1 = gr.Image(label="output image") - select_img_input_w1 = gr.Variable() - select_img_input_img1 = gr.Variable() - - with gr.TabItem("encode image 画像からエンコード"): - with gr.Row(): - with gr.Column(): - gr.Markdown("you'd better upload a standing full-body image 完全な立ち絵の画像をアップロードしてください") - encode_img_input = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 7)] - encode_img_examples = gr.Dataset(components=[encode_img_input], samples=examples_data) - with gr.Group(): - encode_img_submit = gr.Button("Run", variant="primary") - with gr.Column(): - encode_img_output1 = gr.Textbox(label="output message") - with gr.Row(): - encode_img_output2 = gr.Image(label="detected") - encode_img_output3 = gr.Image(label="encoded") - select_img_input_w2 = gr.Variable() - select_img_input_img2 = gr.Variable() - - with gr.TabItem("generate video ビデオ合成"): - with gr.Row(): - with gr.Column(): - gr.Markdown("generate video between 2 images 2つの画像からビデオを生成します") - with gr.Row(): - with gr.Column(): - select_img1_dropdown = gr.Radio(label="Select image 1", value="current generated image 現在の生成画像から", - choices=["current generated image", - "current encoded image"], type="index") - with gr.Group(): - select_img1_button = gr.Button("Select", variant="primary") - select_img1_output_img = gr.Image(label="selected image 1") - select_img1_output_w = gr.Variable() - with gr.Column(): - select_img2_dropdown = gr.Radio(label="Select image 2", value="current generated image 現在の生成画像から", - choices=["current generated image", - "current encoded image"], type="index") - with gr.Group(): - select_img2_button = gr.Button("Select", variant="primary") - select_img2_output_img = gr.Image(label="selected image 2") - select_img2_output_w = gr.Variable() - generate_video_frame = gr.Slider(minimum=10, maximum=30, step=1, label="frame", value=15) - with gr.Group(): - generate_video_button = gr.Button("Generate", variant="primary") - with gr.Column(): - generate_video_output = gr.Video(label="output video") - gen_submit.click(gen_fn, [gen_input1, gen_input2, gen_input3, gen_input4, gen_input5], - [gen_output1, gen_input1, select_img_input_w1, select_img_input_img1]) - encode_img_submit.click(encode_img_fn, [encode_img_input, gen_input5], - [encode_img_output1, encode_img_output2, encode_img_output3, select_img_input_w2, - select_img_input_img2]) - encode_img_examples.click(lambda x: x[0], [encode_img_examples], [encode_img_input]) - select_img1_button.click(lambda i, img1, img2, w1, w2: (img1, w1) if i == 0 else (img2, w2), - [select_img1_dropdown, select_img_input_img1, select_img_input_img2, - select_img_input_w1, select_img_input_w2], - [select_img1_output_img, select_img1_output_w]) - select_img2_button.click(lambda i, img1, img2, w1, w2: (img1, w1) if i == 0 else (img2, w2), - [select_img2_dropdown, select_img_input_img1, select_img_input_img2, - select_img_input_w1, select_img_input_w2], - [select_img2_output_img, select_img2_output_w]) - generate_video_button.click(gen_video_fn, - [select_img1_output_w, select_img2_output_w, gen_input5, generate_video_frame], - [generate_video_output]) - app.launch() diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_bias_act.cpp b/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_bias_act.cpp deleted file mode 100644 index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/op/fused_bias_act.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include - - -torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale) { - CHECK_CUDA(input); - CHECK_CUDA(bias); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} \ No newline at end of file diff --git a/spaces/Duskfallcrew/anything-v3.0/app.py b/spaces/Duskfallcrew/anything-v3.0/app.py deleted file mode 100644 index 99a6a3762d5e337f08e960c4a31b4ac2467bca49..0000000000000000000000000000000000000000 --- a/spaces/Duskfallcrew/anything-v3.0/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr - -description = """
- -
- """ - -gr.Interface.load("models/Linaqruf/anything-v3.0", description=description).launch() \ No newline at end of file diff --git a/spaces/ECE1786-AG/ArtIstic-GENREator/README.md b/spaces/ECE1786-AG/ArtIstic-GENREator/README.md deleted file mode 100644 index e4851613da3333e46401d872c0923c6742ae6e80..0000000000000000000000000000000000000000 --- a/spaces/ECE1786-AG/ArtIstic-GENREator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ArtIstic GENREator -emoji: 📈 -colorFrom: indigo -colorTo: green -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Egrt/GCycleGAN/nets/cyclegan.py b/spaces/Egrt/GCycleGAN/nets/cyclegan.py deleted file mode 100644 index 7e507d1eaddda0e187700a3a7706e39a2a9c2b00..0000000000000000000000000000000000000000 --- a/spaces/Egrt/GCycleGAN/nets/cyclegan.py +++ /dev/null @@ -1,923 +0,0 @@ -# ----------------------------------------------------------------------------------- -# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257 -# Originally Written by Ze Liu, Modified by Jingyun Liang. -# ----------------------------------------------------------------------------------- - -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - r""" Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim ** -0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """ - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = (q @ k.transpose(-2, -1)) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - def extra_repr(self) -> str: - return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' - - def flops(self, N): - # calculate flops for 1 window with token length of N - flops = 0 - # qkv = self.qkv(x) - flops += N * self.dim * 3 * self.dim - # attn = (q @ k.transpose(-2, -1)) - flops += self.num_heads * N * (self.dim // self.num_heads) * N - # x = (attn @ v) - flops += self.num_heads * N * N * (self.dim // self.num_heads) - # x = self.proj(x) - flops += N * self.dim * self.dim - return flops - - -class SwinTransformerBlock(nn.Module): - r""" Swin Transformer Block. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., - act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - if min(self.input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(self.input_resolution) - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, - qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if self.shift_size > 0: - attn_mask = self.calculate_mask(self.input_resolution) - else: - attn_mask = None - - self.register_buffer("attn_mask", attn_mask) - - def calculate_mask(self, x_size): - # calculate attention mask for SW-MSA - H, W = x_size - img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 - h_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - w_slices = (slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None)) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - - return attn_mask - - def forward(self, x, x_size): - H, W = x_size - B, L, C = x.shape - # assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_x = x - - # partition windows - x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size - if self.input_resolution == x_size: - attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C - else: - attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - def extra_repr(self) -> str: - return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ - f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" - - def flops(self): - flops = 0 - H, W = self.input_resolution - # norm1 - flops += self.dim * H * W - # W-MSA/SW-MSA - nW = H * W / self.window_size / self.window_size - flops += nW * self.attn.flops(self.window_size * self.window_size) - # mlp - flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio - # norm2 - flops += self.dim * H * W - return flops - - -class PatchMerging(nn.Module): - r""" Patch Merging Layer. - - Args: - input_resolution (tuple[int]): Resolution of input feature. - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.input_resolution = input_resolution - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x): - """ - x: B, H*W, C - """ - H, W = self.input_resolution - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." - - x = x.view(B, H, W, C) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - def extra_repr(self) -> str: - return f"input_resolution={self.input_resolution}, dim={self.dim}" - - def flops(self): - H, W = self.input_resolution - flops = H * W * self.dim - flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim - return flops - - -class BasicLayer(nn.Module): - """ A basic Swin Transformer layer for one stage. - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__(self, dim, input_resolution, depth, num_heads, window_size, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): - - super().__init__() - self.dim = dim - self.input_resolution = input_resolution - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList([ - SwinTransformerBlock(dim=dim, input_resolution=input_resolution, - num_heads=num_heads, window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop, attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer) - for i in range(depth)]) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, x_size): - for blk in self.blocks: - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, x_size) - else: - x = blk(x, x_size) - if self.downsample is not None: - x = self.downsample(x) - return x - - def extra_repr(self) -> str: - return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" - - def flops(self): - flops = 0 - for blk in self.blocks: - flops += blk.flops() - if self.downsample is not None: - flops += self.downsample.flops() - return flops - - -class RSTB(nn.Module): - """Residual Swin Transformer Block (RSTB). - - Args: - dim (int): Number of input channels. - input_resolution (tuple[int]): Input resolution. - depth (int): Number of blocks. - num_heads (int): Number of attention heads. - window_size (int): Local window size. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - img_size: Input image size. - patch_size: Patch size. - resi_connection: The convolutional block before residual connection. - """ - - def __init__(self, dim, input_resolution, depth, num_heads, window_size, - mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, - img_size=224, patch_size=4, resi_connection='1conv'): - super(RSTB, self).__init__() - - self.dim = dim - self.input_resolution = input_resolution - - self.residual_group = BasicLayer(dim=dim, - input_resolution=input_resolution, - depth=depth, - num_heads=num_heads, - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop, attn_drop=attn_drop, - drop_path=drop_path, - norm_layer=norm_layer, - downsample=downsample, - use_checkpoint=use_checkpoint) - - if resi_connection == '1conv': - self.conv = nn.Conv2d(dim, dim, 3, 1, 1) - elif resi_connection == '3conv': - # to save parameters and memory - self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.GELU(), - nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), - nn.GELU(), - nn.Conv2d(dim // 4, dim, 3, 1, 1)) - - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, - norm_layer=None) - - self.patch_unembed = PatchUnEmbed( - img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, - norm_layer=None) - - def forward(self, x, x_size): - return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x - - def flops(self): - flops = 0 - flops += self.residual_group.flops() - H, W = self.input_resolution - flops += H * W * self.dim * self.dim * 9 - flops += self.patch_embed.flops() - flops += self.patch_unembed.flops() - - return flops - - -class PatchEmbed(nn.Module): - r""" Image to Patch Embedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - x = x.flatten(2).transpose(1, 2) # B Ph*Pw C - if self.norm is not None: - x = self.norm(x) - return x - - def flops(self): - flops = 0 - H, W = self.img_size - if self.norm is not None: - flops += H * W * self.embed_dim - return flops - - -class PatchUnEmbed(nn.Module): - r""" Image to Patch Unembedding - - Args: - img_size (int): Image size. Default: 224. - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] - self.img_size = img_size - self.patch_size = patch_size - self.patches_resolution = patches_resolution - self.num_patches = patches_resolution[0] * patches_resolution[1] - - self.in_chans = in_chans - self.embed_dim = embed_dim - - def forward(self, x, x_size): - B, HW, C = x.shape - x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C - return x - - def flops(self): - flops = 0 - return flops - - -class Upsample(nn.Sequential): - """Upsample module. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - """ - - def __init__(self, scale, num_feat): - m = [] - if (scale & (scale - 1)) == 0: # scale = 2^n - for _ in range(int(math.log(scale, 2))): - m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(2)) - elif scale == 3: - m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1)) - m.append(nn.PixelShuffle(3)) - else: - raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') - super(Upsample, self).__init__(*m) - - -class UpsampleOneStep(nn.Sequential): - """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle) - Used in lightweight SR to save parameters. - - Args: - scale (int): Scale factor. Supported scales: 2^n and 3. - num_feat (int): Channel number of intermediate features. - - """ - - def __init__(self, scale, num_feat, num_out_ch, input_resolution=None): - self.num_feat = num_feat - self.input_resolution = input_resolution - m = [] - m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1)) - m.append(nn.PixelShuffle(scale)) - super(UpsampleOneStep, self).__init__(*m) - - def flops(self): - H, W = self.input_resolution - flops = H * W * self.num_feat * 3 * 9 - return flops - - -class Generator(nn.Module): - r""" SwinIR - A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer. - - Args: - img_size (int | tuple(int)): Input image size. Default 64 - patch_size (int | tuple(int)): Patch size. Default: 1 - in_chans (int): Number of input image channels. Default: 3 - embed_dim (int): Patch embedding dimension. Default: 96 - depths (tuple(int)): Depth of each Swin Transformer layer. - num_heads (tuple(int)): Number of attention heads in different layers. - window_size (int): Window size. Default: 7 - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None - drop_rate (float): Dropout rate. Default: 0 - attn_drop_rate (float): Attention dropout rate. Default: 0 - drop_path_rate (float): Stochastic depth rate. Default: 0.1 - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False - patch_norm (bool): If True, add normalization after patch embedding. Default: True - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False - upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction - img_range: Image range. 1. or 255. - upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None - resi_connection: The convolutional block before residual connection. '1conv'/'3conv' - """ - - def __init__(self, img_size=64, patch_size=1, in_chans=3, out_chans=3, - embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6], - window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, - norm_layer=nn.LayerNorm, ape=False, patch_norm=True, - use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', - **kwargs): - super(Generator, self).__init__() - num_in_ch = in_chans - num_out_ch = out_chans - num_feat = 64 - self.img_range = img_range - if in_chans == 3: - rgb_mean = (0.4488, 0.4371, 0.4040) - self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1) - else: - self.mean = torch.zeros(1, 1, 1, 1) - self.upscale = upscale - self.upsampler = upsampler - self.window_size = window_size - # -------------浅层特征提取------------ # - self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) - - # -------------深层特征提取------------ # - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.num_features = embed_dim - self.mlp_ratio = mlp_ratio - - # -------------将图片划分为不重叠的Patch------------ # - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - num_patches = self.patch_embed.num_patches - patches_resolution = self.patch_embed.patches_resolution - self.patches_resolution = patches_resolution - - # -------------将重叠的Patch进行融合------------ # - self.patch_unembed = PatchUnEmbed( - img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None) - - # -------------绝对位置编码------------ # - if self.ape: - self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) - trunc_normal_(self.absolute_pos_embed, std=.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule - - # build Residual Swin Transformer blocks (RSTB) - self.layers = nn.ModuleList() - for i_layer in range(self.num_layers): - layer = RSTB(dim=embed_dim, - input_resolution=(patches_resolution[0], - patches_resolution[1]), - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results - norm_layer=norm_layer, - downsample=None, - use_checkpoint=use_checkpoint, - img_size=img_size, - patch_size=patch_size, - resi_connection=resi_connection - - ) - self.layers.append(layer) - self.norm = norm_layer(self.num_features) - - # build the last conv layer in deep feature extraction - if resi_connection == '1conv': - self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1) - elif resi_connection == '3conv': - # to save parameters and memory - self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), - nn.GELU(), - nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), - nn.GELU(), - nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1)) - # -------------超分辨率重建模块------------ # - if self.upsampler == 'pixelshuffle': - # for classical SR - self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), - nn.GELU()) - self.upsample = Upsample(upscale, num_feat) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - elif self.upsampler == 'pixelshuffledirect': - # for lightweight SR (to save parameters) - self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, - (patches_resolution[0], patches_resolution[1])) - elif self.upsampler == 'nearest+conv': - # for real-world SR (less artifacts) - assert self.upscale == 4, 'only support x4 now.' - self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), - nn.GELU()) - self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - self.lrelu = nn.GELU() - else: - # for image denoising and JPEG compression artifact reduction - self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'absolute_pos_embed'} - - @torch.jit.ignore - def no_weight_decay_keywords(self): - return {'relative_position_bias_table'} - - def check_image_size(self, x): - _, _, h, w = x.size() - mod_pad_h = (self.window_size - h % self.window_size) % self.window_size - mod_pad_w = (self.window_size - w % self.window_size) % self.window_size - x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect') - return x - - def forward_features(self, x): - x_size = (x.shape[2], x.shape[3]) - x = self.patch_embed(x) - if self.ape: - x = x + self.absolute_pos_embed - x = self.pos_drop(x) - - for layer in self.layers: - x = layer(x, x_size) - - x = self.norm(x) # B L C - x = self.patch_unembed(x, x_size) - - return x - - def forward(self, x): - H, W = x.shape[2:] - x = self.check_image_size(x) - - self.mean = self.mean.type_as(x) - x = (x - self.mean) * self.img_range - - if self.upsampler == 'pixelshuffle': - # for classical SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.conv_before_upsample(x) - x = self.conv_last(self.upsample(x)) - elif self.upsampler == 'pixelshuffledirect': - # for lightweight SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.upsample(x) - elif self.upsampler == 'nearest+conv': - # for real-world SR - x = self.conv_first(x) - x = self.conv_after_body(self.forward_features(x)) + x - x = self.conv_before_upsample(x) - x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) - x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) - x = self.conv_last(self.lrelu(self.conv_hr(x))) - else: - # for image denoising and JPEG compression artifact reduction - x_first = self.conv_first(x) - res = self.conv_after_body(self.forward_features(x_first)) + x_first - x = self.conv_last(res) - - x = x / self.img_range + self.mean - - return x[:, :, :H*self.upscale, :W*self.upscale] - - def flops(self): - flops = 0 - H, W = self.patches_resolution - flops += H * W * 3 * self.embed_dim * 9 - flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): - flops += layer.flops() - flops += H * W * 3 * self.embed_dim * self.embed_dim - flops += self.upsample.flops() - return flops - - -class Discriminator(nn.Module): - def __init__(self): - super(Discriminator, self).__init__() - self.net = nn.Sequential( - nn.Conv2d(3, 64, kernel_size=3, padding=1), - nn.GELU(), - - nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1), - nn.GELU(), - - nn.Conv2d(64, 128, kernel_size=3, padding=1), - nn.GELU(), - - nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1), - nn.GELU(), - - nn.Conv2d(128, 256, kernel_size=3, padding=1), - nn.GELU(), - - nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1), - nn.GELU(), - - nn.Conv2d(256, 512, kernel_size=3, padding=1), - nn.GELU(), - - nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1), - nn.GELU(), - - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(512, 1024, kernel_size=1), - nn.GELU(), - nn.Conv2d(1024, 1, kernel_size=1) - ) - - def forward(self, x): - batch_size = x.size(0) - return self.net(x).view(batch_size) - -def compute_gradient_penalty(D, real_samples, fake_samples): - alpha = torch.randn(real_samples.size(0), 1, 1, 1) - if torch.cuda.is_available(): - alpha = alpha.cuda() - - interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True) - d_interpolates = D(interpolates) - fake = torch.ones(d_interpolates.size()) - if torch.cuda.is_available(): - fake = fake.cuda() - - gradients = torch.autograd.grad( - outputs=d_interpolates, - inputs=interpolates, - grad_outputs=fake, - create_graph=True, - retain_graph=True, - only_inputs=True, - )[0] - gradients = gradients.view(gradients.size(0), -1) - gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() - return gradient_penalty - -if __name__ == '__main__': - upscale = 1 - window_size = 7 - height = (110 // upscale // window_size + 1) * window_size - width = (110 // upscale // window_size + 1) * window_size - model = Generator(upscale=upscale, img_size=(height, width), - window_size=window_size, img_range=1., depths=[6, 6, 6, 6], - embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=4, upsampler='nearest+conv') - print(model) - # print(height, width, model.flops() / 1e9) - - x = torch.randn((1, 3, height, width)) - x = model(x) - print(x.shape) diff --git a/spaces/FSDL-Fashion/fashion_img_search/fis/feature_extraction/pipeline/base.py b/spaces/FSDL-Fashion/fashion_img_search/fis/feature_extraction/pipeline/base.py deleted file mode 100644 index 43adc6a82090ab9eb2ddfb5894612b8d7e10f63d..0000000000000000000000000000000000000000 --- a/spaces/FSDL-Fashion/fashion_img_search/fis/feature_extraction/pipeline/base.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import Any, List - -import numpy as np -import torch -from PIL import Image -from PIL.Image import Image as Img - -from fis.feature_extraction.detection.base import BaseDetector -from fis.feature_extraction.embedding.base import BaseEncoder - - -class EncodingPipeline: - """Apply the detection and embedding models to an image.""" - - def __init__(self, name: str, detection_model: BaseDetector, embedding_model: BaseEncoder) -> None: - """Initialize the encoding pipeline. - - Args: - name: Name of the pipeline. - detection_model: Model used to detect the fashion items in the images. - embedding_model: Model used to generate embeddings for each detected item. - """ - self._name = name - self._detection_model = detection_model - self._embedding_model = embedding_model - - def encode(self, image: str) -> List[torch.Tensor]: - """Encode each item from an image into a embedding. - - Args: - image: path to the image. - - Returns: - Embeddings for each detected item in the image. - """ - image = self._load_images(image) - bboxes = self._detection_model(image) - items = self._crop_images(image, bboxes) - - embeddings = [] - for item in items: - embedding = self._embedding_model(item) - embeddings.append(embedding) - - return embeddings - - def _load_images(self, image: Any) -> Img: - """Read an image from disk. - - Args: - image: Path to the image on disk. - - Raises: - TypeError: if the type of image is incorrect. - - Returns: - PIL Image. - """ - if isinstance(image, Img): - pass - elif isinstance(image, np.ndarray): - image = Image.fromarray(image) - elif isinstance(image, str): - image = Image.open(image) - else: - raise TypeError(f"Unknown type for image: {type(image)}") - - return image - - def _crop_images(self, image, bboxes) -> List[Img]: - """Crop an image based on bounding boxes. - - Args: - image: Image to crop items from. - bboxes: Bounding box containing an item. - - Returns: - List of cropped images. - """ - items = [] - for bbox in bboxes: - cropped_image = image.crop(bbox) - items.append(cropped_image) - - return items diff --git a/spaces/FaceOnLive/ID-Document-Recognition-SDK/run.sh b/spaces/FaceOnLive/ID-Document-Recognition-SDK/run.sh deleted file mode 100644 index 3e4c338600a6dffbf30a728cf33544300c42112a..0000000000000000000000000000000000000000 --- a/spaces/FaceOnLive/ID-Document-Recognition-SDK/run.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -exec ./ocrengine/ttvocrsrv & -exec python3 app.py & -exec python3 demo.py \ No newline at end of file diff --git a/spaces/Felladrin/LaMini-Flan-T5-248M-Candle-Wasm/utils.js b/spaces/Felladrin/LaMini-Flan-T5-248M-Candle-Wasm/utils.js deleted file mode 100644 index 0985c653fde4b392114f026527a736ad5c9bf42c..0000000000000000000000000000000000000000 --- a/spaces/Felladrin/LaMini-Flan-T5-248M-Candle-Wasm/utils.js +++ /dev/null @@ -1,174 +0,0 @@ -export async function extractEmbeddings( - worker, - weightsURL, - tokenizerURL, - configURL, - modelID, - sentences, - updateStatus, - normalize_embeddings = true -) { - return new Promise((resolve, reject) => { - worker.postMessage({ - weightsURL, - tokenizerURL, - configURL, - modelID, - sentences, - normalize_embeddings, - }); - function messageHandler(event) { - if ("error" in event.data) { - worker.removeEventListener("message", messageHandler); - reject(new Error(event.data.error)); - } - if (event.data.status === "complete") { - worker.removeEventListener("message", messageHandler); - resolve(event.data); - } - if (updateStatus) updateStatus(event.data); - } - worker.addEventListener("message", messageHandler); - }); -} - -export async function generateText( - worker, - weightsURL, - tokenizerURL, - configURL, - modelID, - prompt, - params, - updateStatus -) { - return new Promise((resolve, reject) => { - worker.postMessage({ - weightsURL, - tokenizerURL, - configURL, - modelID, - prompt, - params, - }); - function messageHandler(event) { - if ("error" in event.data) { - worker.removeEventListener("message", messageHandler); - reject(new Error(event.data.error)); - } - if (event.data.status === "complete") { - worker.removeEventListener("message", messageHandler); - resolve(event.data); - } - if (updateStatus) updateStatus(event.data); - } - worker.addEventListener("message", messageHandler); - }); -} - -const tasks = { - translation_en_to_de: { - prefix: "translate English to German: ", - max_length: 300, - }, - translation_en_to_fr: { - prefix: "translate English to French: ", - max_length: 300, - }, - translation_en_to_ro: { - prefix: "translate English to Romanian: ", - max_length: 300, - }, - summarization: { prefix: "summarize: ", max_length: 200 }, - fluency: { - prefix: "Fix the grammar: ", - max_length: 300, - }, - coherence: { - prefix: "Rewrite to make this easier to understand: ", - max_length: 300, - }, - simplification: { - prefix: "translate English to Romanian: ", - max_length: 300, - }, - simplification: { - prefix: "Paraphrase this: ", - max_length: 300, - }, - formalization: { - prefix: "Write this more formally: ", - max_length: 300, - }, - neutralize: { - prefix: "Write in a more neutral way: ", - max_length: 300, - }, -}; - -export const MODELS = { - "LaMini-Flan-T5-248M-candle-q2k.gguf": { - size: "81.4 MB", - base_url: - "https://huggingface.co/Felladrin/candle-quantized-LaMini-Flan-T5-248M/resolve/main/", - model: "LaMini-Flan-T5-248M-candle-q2k.gguf", - tokenizer: "tokenizer.json", - config: "config.json", - tasks, - }, - "LaMini-Flan-T5-248M-candle-q3k.gguf": { - size: "106.6 MB", - base_url: - "https://huggingface.co/Felladrin/candle-quantized-LaMini-Flan-T5-248M/resolve/main/", - model: "LaMini-Flan-T5-248M-candle-q3k.gguf", - tokenizer: "tokenizer.json", - config: "config.json", - tasks, - }, - "LaMini-Flan-T5-248M-candle-q4k.gguf": { - size: "139.5 MB", - base_url: - "https://huggingface.co/Felladrin/candle-quantized-LaMini-Flan-T5-248M/resolve/main/", - model: "LaMini-Flan-T5-248M-candle-q4k.gguf", - tokenizer: "tokenizer.json", - config: "config.json", - tasks, - }, - "LaMini-Flan-T5-248M-candle-q5k.gguf": { - size: "170.4 MB", - base_url: - "https://huggingface.co/Felladrin/candle-quantized-LaMini-Flan-T5-248M/resolve/main/", - model: "LaMini-Flan-T5-248M-candle-q5k.gguf", - tokenizer: "tokenizer.json", - config: "config.json", - tasks, - }, - "LaMini-Flan-T5-248M-candle-q6k.gguf": { - size: "203.3 MB", - base_url: - "https://huggingface.co/Felladrin/candle-quantized-LaMini-Flan-T5-248M/resolve/main/", - model: "LaMini-Flan-T5-248M-candle-q6k.gguf", - tokenizer: "tokenizer.json", - config: "config.json", - tasks, - }, - "LaMini-Flan-T5-248M-candle-q8k.gguf": { - size: "282.6 MB", - base_url: - "https://huggingface.co/Felladrin/candle-quantized-LaMini-Flan-T5-248M/resolve/main/", - model: "LaMini-Flan-T5-248M-candle-q8k.gguf", - tokenizer: "tokenizer.json", - config: "config.json", - tasks, - }, -}; - -export function getModelInfo(id, taskID) { - const model = MODELS[id]; - return { - modelURL: model.base_url + model.model, - configURL: model.base_url + model.config, - tokenizerURL: model.base_url + model.tokenizer, - maxLength: model.tasks[taskID].max_length, - }; -} diff --git a/spaces/Froleptan/stablediffusion-infinity/PyPatchMatch/examples/cpp_example.cpp b/spaces/Froleptan/stablediffusion-infinity/PyPatchMatch/examples/cpp_example.cpp deleted file mode 100644 index 8f3ff5c52783cf1e9d6ec6a8a6017f857c372c0e..0000000000000000000000000000000000000000 --- a/spaces/Froleptan/stablediffusion-infinity/PyPatchMatch/examples/cpp_example.cpp +++ /dev/null @@ -1,31 +0,0 @@ -#include -#include -#include - -#include "masked_image.h" -#include "nnf.h" -#include "inpaint.h" - -int main() { - auto source = cv::imread("./images/forest_pruned.bmp", cv::IMREAD_COLOR); - - auto mask = cv::Mat(source.size(), CV_8UC1); - mask = cv::Scalar::all(0); - for (int i = 0; i < source.size().height; ++i) { - for (int j = 0; j < source.size().width; ++j) { - auto source_ptr = source.ptr(i, j); - if (source_ptr[0] == 255 && source_ptr[1] == 255 && source_ptr[2] == 255) { - mask.at(i, j) = 1; - } - } - } - - auto metric = PatchSSDDistanceMetric(3); - auto result = Inpainting(source, mask, &metric).run(true, true); - // cv::imwrite("./images/forest_recovered.bmp", result); - // cv::imshow("Result", result); - // cv::waitKey(); - - return 0; -} - diff --git a/spaces/Ggxcc4566/stabilityai-stable-diffusion-xl-refiner-1.0/app.py b/spaces/Ggxcc4566/stabilityai-stable-diffusion-xl-refiner-1.0/app.py deleted file mode 100644 index f0854c17140255ac783638680bc5dce595cc9fd0..0000000000000000000000000000000000000000 --- a/spaces/Ggxcc4566/stabilityai-stable-diffusion-xl-refiner-1.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-xl-refiner-1.0").launch() \ No newline at end of file diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/magnify.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/magnify.py deleted file mode 100644 index 96060b1e27355e15d8b432cec4b65f2ef6975c50..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/magnify.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# file: test.py -# time: 05/12/2022 -# author: yangheng -# github: https://github.com/yangheng95 -# huggingface: https://huggingface.co/yangheng -# google scholar: https://scholar.google.com/citations?user=NPq5a_0AAAAJ&hl=en -# Copyright (C) 2021. All Rights Reserved. -from pathlib import Path -from typing import Union - -import autocuda -import findfile -from pyabsa.utils.pyabsa_utils import fprint -from torchvision import transforms -from .utils.prepare_images import * -from .Models import * - - -class ImageMagnifier: - def __init__(self): - self.device = autocuda.auto_cuda() - self.model_cran_v2 = CARN_V2( - color_channels=3, - mid_channels=64, - conv=nn.Conv2d, - single_conv_size=3, - single_conv_group=1, - scale=2, - activation=nn.LeakyReLU(0.1), - SEBlock=True, - repeat_blocks=3, - atrous=(1, 1, 1), - ) - - self.model_cran_v2 = network_to_half(self.model_cran_v2) - self.checkpoint = findfile.find_cwd_file("CARN_model_checkpoint.pt") - self.model_cran_v2.load_state_dict( - torch.load(self.checkpoint, map_location="cpu") - ) - # if use GPU, then comment out the next line so it can use fp16. - self.model_cran_v2 = self.model_cran_v2.float().to(self.device) - self.model_cran_v2.to(self.device) - - def __image_scale(self, img, scale_factor: int = 2): - img_splitter = ImageSplitter( - seg_size=64, scale_factor=scale_factor, boarder_pad_size=3 - ) - img_patches = img_splitter.split_img_tensor(img, scale_method=None, img_pad=0) - with torch.no_grad(): - if self.device != "cpu": - with torch.cuda.amp.autocast(): - out = [self.model_cran_v2(i.to(self.device)) for i in img_patches] - else: - with torch.cpu.amp.autocast(): - out = [self.model_cran_v2(i) for i in img_patches] - img_upscale = img_splitter.merge_img_tensor(out) - - final = torch.cat([img_upscale]) - - return transforms.ToPILImage()(final[0]) - - def magnify(self, img, scale_factor: int = 2): - fprint("scale factor reset to:", scale_factor // 2 * 2) - _scale_factor = scale_factor - while _scale_factor // 2 > 0: - img = self.__image_scale(img, scale_factor=2) - _scale_factor = _scale_factor // 2 - return img - - def magnify_from_file( - self, img_path: Union[str, Path], scale_factor: int = 2, save_img: bool = True - ): - - if not os.path.exists(img_path): - raise FileNotFoundError("Path is not found.") - if os.path.isfile(img_path): - try: - img = Image.open(img_path) - img = self.magnify(img, scale_factor) - if save_img: - img.save(os.path.join(img_path)) - except Exception as e: - fprint(img_path, e) - fprint(img_path, "Done.") - - elif os.path.isdir(img_path): - for path in os.listdir(img_path): - try: - img = Image.open(os.path.join(img_path, path)) - img = self.magnify(img, scale_factor) - if save_img: - img.save(os.path.join(img_path, path)) - except Exception as e: - fprint(path, e) - continue - fprint(path, "Done.") - else: - raise TypeError("Path is not a file or directory.") diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/__init__.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/__init__.py deleted file mode 100644 index 9821d212c3c7781e601ea8d2137493942d0937d4..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Data pipeline for model features.""" diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py deleted file mode 100644 index 641ef764d2713184845b624b20db1771cfcd6739..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r101_fpn_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/mask/structures.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/mask/structures.py deleted file mode 100644 index d9ec5775f281ab8b76cb873e71a4edd9969ab905..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/mask/structures.py +++ /dev/null @@ -1,1024 +0,0 @@ -from abc import ABCMeta, abstractmethod - -import cv2 -import mmcv -import numpy as np -import pycocotools.mask as maskUtils -import torch -from mmcv.ops.roi_align import roi_align - - -class BaseInstanceMasks(metaclass=ABCMeta): - """Base class for instance masks.""" - - @abstractmethod - def rescale(self, scale, interpolation='nearest'): - """Rescale masks as large as possible while keeping the aspect ratio. - For details can refer to `mmcv.imrescale`. - - Args: - scale (tuple[int]): The maximum size (h, w) of rescaled mask. - interpolation (str): Same as :func:`mmcv.imrescale`. - - Returns: - BaseInstanceMasks: The rescaled masks. - """ - - @abstractmethod - def resize(self, out_shape, interpolation='nearest'): - """Resize masks to the given out_shape. - - Args: - out_shape: Target (h, w) of resized mask. - interpolation (str): See :func:`mmcv.imresize`. - - Returns: - BaseInstanceMasks: The resized masks. - """ - - @abstractmethod - def flip(self, flip_direction='horizontal'): - """Flip masks alone the given direction. - - Args: - flip_direction (str): Either 'horizontal' or 'vertical'. - - Returns: - BaseInstanceMasks: The flipped masks. - """ - - @abstractmethod - def pad(self, out_shape, pad_val): - """Pad masks to the given size of (h, w). - - Args: - out_shape (tuple[int]): Target (h, w) of padded mask. - pad_val (int): The padded value. - - Returns: - BaseInstanceMasks: The padded masks. - """ - - @abstractmethod - def crop(self, bbox): - """Crop each mask by the given bbox. - - Args: - bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). - - Return: - BaseInstanceMasks: The cropped masks. - """ - - @abstractmethod - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device, - interpolation='bilinear'): - """Crop and resize masks by the given bboxes. - - This function is mainly used in mask targets computation. - It firstly align mask to bboxes by assigned_inds, then crop mask by the - assigned bbox and resize to the size of (mask_h, mask_w) - - Args: - bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) - out_shape (tuple[int]): Target (h, w) of resized mask - inds (ndarray): Indexes to assign masks to each bbox, - shape (N,) and values should be between [0, num_masks - 1]. - device (str): Device of bboxes - interpolation (str): See `mmcv.imresize` - - Return: - BaseInstanceMasks: the cropped and resized masks. - """ - - @abstractmethod - def expand(self, expanded_h, expanded_w, top, left): - """see :class:`Expand`.""" - - @property - @abstractmethod - def areas(self): - """ndarray: areas of each instance.""" - - @abstractmethod - def to_ndarray(self): - """Convert masks to the format of ndarray. - - Return: - ndarray: Converted masks in the format of ndarray. - """ - - @abstractmethod - def to_tensor(self, dtype, device): - """Convert masks to the format of Tensor. - - Args: - dtype (str): Dtype of converted mask. - device (torch.device): Device of converted masks. - - Returns: - Tensor: Converted masks in the format of Tensor. - """ - - @abstractmethod - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Translate the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - fill_val (int | float): Border value. Default 0. - interpolation (str): Same as :func:`mmcv.imtranslate`. - - Returns: - Translated masks. - """ - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - magnitude (int | float): The magnitude used for shear. - direction (str): The shear direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. Default 0. - interpolation (str): Same as in :func:`mmcv.imshear`. - - Returns: - ndarray: Sheared masks. - """ - - @abstractmethod - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """Rotate the masks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - angle (int | float): Rotation angle in degrees. Positive values - mean counter-clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the - rotation in source image. If not specified, the center of - the image will be used. - scale (int | float): Isotropic scale factor. - fill_val (int | float): Border value. Default 0 for masks. - - Returns: - Rotated masks. - """ - - -class BitmapMasks(BaseInstanceMasks): - """This class represents masks in the form of bitmaps. - - Args: - masks (ndarray): ndarray of masks in shape (N, H, W), where N is - the number of objects. - height (int): height of masks - width (int): width of masks - - Example: - >>> from mmdet.core.mask.structures import * # NOQA - >>> num_masks, H, W = 3, 32, 32 - >>> rng = np.random.RandomState(0) - >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) - >>> self = BitmapMasks(masks, height=H, width=W) - - >>> # demo crop_and_resize - >>> num_boxes = 5 - >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) - >>> out_shape = (14, 14) - >>> inds = torch.randint(0, len(self), size=(num_boxes,)) - >>> device = 'cpu' - >>> interpolation = 'bilinear' - >>> new = self.crop_and_resize( - ... bboxes, out_shape, inds, device, interpolation) - >>> assert len(new) == num_boxes - >>> assert new.height, new.width == out_shape - """ - - def __init__(self, masks, height, width): - self.height = height - self.width = width - if len(masks) == 0: - self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) - else: - assert isinstance(masks, (list, np.ndarray)) - if isinstance(masks, list): - assert isinstance(masks[0], np.ndarray) - assert masks[0].ndim == 2 # (H, W) - else: - assert masks.ndim == 3 # (N, H, W) - - self.masks = np.stack(masks).reshape(-1, height, width) - assert self.masks.shape[1] == self.height - assert self.masks.shape[2] == self.width - - def __getitem__(self, index): - """Index the BitmapMask. - - Args: - index (int | ndarray): Indices in the format of integer or ndarray. - - Returns: - :obj:`BitmapMasks`: Indexed bitmap masks. - """ - masks = self.masks[index].reshape(-1, self.height, self.width) - return BitmapMasks(masks, self.height, self.width) - - def __iter__(self): - return iter(self.masks) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += f'num_masks={len(self.masks)}, ' - s += f'height={self.height}, ' - s += f'width={self.width})' - return s - - def __len__(self): - """Number of masks.""" - return len(self.masks) - - def rescale(self, scale, interpolation='nearest'): - """See :func:`BaseInstanceMasks.rescale`.""" - if len(self.masks) == 0: - new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) - rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) - else: - rescaled_masks = np.stack([ - mmcv.imrescale(mask, scale, interpolation=interpolation) - for mask in self.masks - ]) - height, width = rescaled_masks.shape[1:] - return BitmapMasks(rescaled_masks, height, width) - - def resize(self, out_shape, interpolation='nearest'): - """See :func:`BaseInstanceMasks.resize`.""" - if len(self.masks) == 0: - resized_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - resized_masks = np.stack([ - mmcv.imresize( - mask, out_shape[::-1], interpolation=interpolation) - for mask in self.masks - ]) - return BitmapMasks(resized_masks, *out_shape) - - def flip(self, flip_direction='horizontal'): - """See :func:`BaseInstanceMasks.flip`.""" - assert flip_direction in ('horizontal', 'vertical', 'diagonal') - - if len(self.masks) == 0: - flipped_masks = self.masks - else: - flipped_masks = np.stack([ - mmcv.imflip(mask, direction=flip_direction) - for mask in self.masks - ]) - return BitmapMasks(flipped_masks, self.height, self.width) - - def pad(self, out_shape, pad_val=0): - """See :func:`BaseInstanceMasks.pad`.""" - if len(self.masks) == 0: - padded_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - padded_masks = np.stack([ - mmcv.impad(mask, shape=out_shape, pad_val=pad_val) - for mask in self.masks - ]) - return BitmapMasks(padded_masks, *out_shape) - - def crop(self, bbox): - """See :func:`BaseInstanceMasks.crop`.""" - assert isinstance(bbox, np.ndarray) - assert bbox.ndim == 1 - - # clip the boundary - bbox = bbox.copy() - bbox[0::2] = np.clip(bbox[0::2], 0, self.width) - bbox[1::2] = np.clip(bbox[1::2], 0, self.height) - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - - if len(self.masks) == 0: - cropped_masks = np.empty((0, h, w), dtype=np.uint8) - else: - cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] - return BitmapMasks(cropped_masks, h, w) - - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device='cpu', - interpolation='bilinear'): - """See :func:`BaseInstanceMasks.crop_and_resize`.""" - if len(self.masks) == 0: - empty_masks = np.empty((0, *out_shape), dtype=np.uint8) - return BitmapMasks(empty_masks, *out_shape) - - # convert bboxes to tensor - if isinstance(bboxes, np.ndarray): - bboxes = torch.from_numpy(bboxes).to(device=device) - if isinstance(inds, np.ndarray): - inds = torch.from_numpy(inds).to(device=device) - - num_bbox = bboxes.shape[0] - fake_inds = torch.arange( - num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] - rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 - rois = rois.to(device=device) - if num_bbox > 0: - gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( - 0, inds).to(dtype=rois.dtype) - targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, - 1.0, 0, 'avg', True).squeeze(1) - resized_masks = (targets >= 0.5).cpu().numpy() - else: - resized_masks = [] - return BitmapMasks(resized_masks, *out_shape) - - def expand(self, expanded_h, expanded_w, top, left): - """See :func:`BaseInstanceMasks.expand`.""" - if len(self.masks) == 0: - expanded_mask = np.empty((0, expanded_h, expanded_w), - dtype=np.uint8) - else: - expanded_mask = np.zeros((len(self), expanded_h, expanded_w), - dtype=np.uint8) - expanded_mask[:, top:top + self.height, - left:left + self.width] = self.masks - return BitmapMasks(expanded_mask, expanded_h, expanded_w) - - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=0, - interpolation='bilinear'): - """Translate the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - offset (int | float): The offset for translate. - direction (str): The translate direction, either "horizontal" - or "vertical". - fill_val (int | float): Border value. Default 0 for masks. - interpolation (str): Same as :func:`mmcv.imtranslate`. - - Returns: - BitmapMasks: Translated BitmapMasks. - - Example: - >>> from mmdet.core.mask.structures import BitmapMasks - >>> self = BitmapMasks.random(dtype=np.uint8) - >>> out_shape = (32, 32) - >>> offset = 4 - >>> direction = 'horizontal' - >>> fill_val = 0 - >>> interpolation = 'bilinear' - >>> # Note, There seem to be issues when: - >>> # * out_shape is different than self's shape - >>> # * the mask dtype is not supported by cv2.AffineWarp - >>> new = self.translate(out_shape, offset, direction, fill_val, - >>> interpolation) - >>> assert len(new) == len(self) - >>> assert new.height, new.width == out_shape - """ - if len(self.masks) == 0: - translated_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - translated_masks = mmcv.imtranslate( - self.masks.transpose((1, 2, 0)), - offset, - direction, - border_value=fill_val, - interpolation=interpolation) - if translated_masks.ndim == 2: - translated_masks = translated_masks[:, :, None] - translated_masks = translated_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(translated_masks, *out_shape) - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """Shear the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - magnitude (int | float): The magnitude used for shear. - direction (str): The shear direction, either "horizontal" - or "vertical". - border_value (int | tuple[int]): Value used in case of a - constant border. - interpolation (str): Same as in :func:`mmcv.imshear`. - - Returns: - BitmapMasks: The sheared masks. - """ - if len(self.masks) == 0: - sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) - else: - sheared_masks = mmcv.imshear( - self.masks.transpose((1, 2, 0)), - magnitude, - direction, - border_value=border_value, - interpolation=interpolation) - if sheared_masks.ndim == 2: - sheared_masks = sheared_masks[:, :, None] - sheared_masks = sheared_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(sheared_masks, *out_shape) - - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """Rotate the BitmapMasks. - - Args: - out_shape (tuple[int]): Shape for output mask, format (h, w). - angle (int | float): Rotation angle in degrees. Positive values - mean counter-clockwise rotation. - center (tuple[float], optional): Center point (w, h) of the - rotation in source image. If not specified, the center of - the image will be used. - scale (int | float): Isotropic scale factor. - fill_val (int | float): Border value. Default 0 for masks. - - Returns: - BitmapMasks: Rotated BitmapMasks. - """ - if len(self.masks) == 0: - rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) - else: - rotated_masks = mmcv.imrotate( - self.masks.transpose((1, 2, 0)), - angle, - center=center, - scale=scale, - border_value=fill_val) - if rotated_masks.ndim == 2: - # case when only one mask, (h, w) - rotated_masks = rotated_masks[:, :, None] # (h, w, 1) - rotated_masks = rotated_masks.transpose( - (2, 0, 1)).astype(self.masks.dtype) - return BitmapMasks(rotated_masks, *out_shape) - - @property - def areas(self): - """See :py:attr:`BaseInstanceMasks.areas`.""" - return self.masks.sum((1, 2)) - - def to_ndarray(self): - """See :func:`BaseInstanceMasks.to_ndarray`.""" - return self.masks - - def to_tensor(self, dtype, device): - """See :func:`BaseInstanceMasks.to_tensor`.""" - return torch.tensor(self.masks, dtype=dtype, device=device) - - @classmethod - def random(cls, - num_masks=3, - height=32, - width=32, - dtype=np.uint8, - rng=None): - """Generate random bitmap masks for demo / testing purposes. - - Example: - >>> from mmdet.core.mask.structures import BitmapMasks - >>> self = BitmapMasks.random() - >>> print('self = {}'.format(self)) - self = BitmapMasks(num_masks=3, height=32, width=32) - """ - from mmdet.utils.util_random import ensure_rng - rng = ensure_rng(rng) - masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) - self = cls(masks, height=height, width=width) - return self - - -class PolygonMasks(BaseInstanceMasks): - """This class represents masks in the form of polygons. - - Polygons is a list of three levels. The first level of the list - corresponds to objects, the second level to the polys that compose the - object, the third level to the poly coordinates - - Args: - masks (list[list[ndarray]]): The first level of the list - corresponds to objects, the second level to the polys that - compose the object, the third level to the poly coordinates - height (int): height of masks - width (int): width of masks - - Example: - >>> from mmdet.core.mask.structures import * # NOQA - >>> masks = [ - >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] - >>> ] - >>> height, width = 16, 16 - >>> self = PolygonMasks(masks, height, width) - - >>> # demo translate - >>> new = self.translate((16, 16), 4., direction='horizontal') - >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) - >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) - - >>> # demo crop_and_resize - >>> num_boxes = 3 - >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) - >>> out_shape = (16, 16) - >>> inds = torch.randint(0, len(self), size=(num_boxes,)) - >>> device = 'cpu' - >>> interpolation = 'bilinear' - >>> new = self.crop_and_resize( - ... bboxes, out_shape, inds, device, interpolation) - >>> assert len(new) == num_boxes - >>> assert new.height, new.width == out_shape - """ - - def __init__(self, masks, height, width): - assert isinstance(masks, list) - if len(masks) > 0: - assert isinstance(masks[0], list) - assert isinstance(masks[0][0], np.ndarray) - - self.height = height - self.width = width - self.masks = masks - - def __getitem__(self, index): - """Index the polygon masks. - - Args: - index (ndarray | List): The indices. - - Returns: - :obj:`PolygonMasks`: The indexed polygon masks. - """ - if isinstance(index, np.ndarray): - index = index.tolist() - if isinstance(index, list): - masks = [self.masks[i] for i in index] - else: - try: - masks = self.masks[index] - except Exception: - raise ValueError( - f'Unsupported input of type {type(index)} for indexing!') - if len(masks) and isinstance(masks[0], np.ndarray): - masks = [masks] # ensure a list of three levels - return PolygonMasks(masks, self.height, self.width) - - def __iter__(self): - return iter(self.masks) - - def __repr__(self): - s = self.__class__.__name__ + '(' - s += f'num_masks={len(self.masks)}, ' - s += f'height={self.height}, ' - s += f'width={self.width})' - return s - - def __len__(self): - """Number of masks.""" - return len(self.masks) - - def rescale(self, scale, interpolation=None): - """see :func:`BaseInstanceMasks.rescale`""" - new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) - if len(self.masks) == 0: - rescaled_masks = PolygonMasks([], new_h, new_w) - else: - rescaled_masks = self.resize((new_h, new_w)) - return rescaled_masks - - def resize(self, out_shape, interpolation=None): - """see :func:`BaseInstanceMasks.resize`""" - if len(self.masks) == 0: - resized_masks = PolygonMasks([], *out_shape) - else: - h_scale = out_shape[0] / self.height - w_scale = out_shape[1] / self.width - resized_masks = [] - for poly_per_obj in self.masks: - resized_poly = [] - for p in poly_per_obj: - p = p.copy() - p[0::2] *= w_scale - p[1::2] *= h_scale - resized_poly.append(p) - resized_masks.append(resized_poly) - resized_masks = PolygonMasks(resized_masks, *out_shape) - return resized_masks - - def flip(self, flip_direction='horizontal'): - """see :func:`BaseInstanceMasks.flip`""" - assert flip_direction in ('horizontal', 'vertical', 'diagonal') - if len(self.masks) == 0: - flipped_masks = PolygonMasks([], self.height, self.width) - else: - flipped_masks = [] - for poly_per_obj in self.masks: - flipped_poly_per_obj = [] - for p in poly_per_obj: - p = p.copy() - if flip_direction == 'horizontal': - p[0::2] = self.width - p[0::2] - elif flip_direction == 'vertical': - p[1::2] = self.height - p[1::2] - else: - p[0::2] = self.width - p[0::2] - p[1::2] = self.height - p[1::2] - flipped_poly_per_obj.append(p) - flipped_masks.append(flipped_poly_per_obj) - flipped_masks = PolygonMasks(flipped_masks, self.height, - self.width) - return flipped_masks - - def crop(self, bbox): - """see :func:`BaseInstanceMasks.crop`""" - assert isinstance(bbox, np.ndarray) - assert bbox.ndim == 1 - - # clip the boundary - bbox = bbox.copy() - bbox[0::2] = np.clip(bbox[0::2], 0, self.width) - bbox[1::2] = np.clip(bbox[1::2], 0, self.height) - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - - if len(self.masks) == 0: - cropped_masks = PolygonMasks([], h, w) - else: - cropped_masks = [] - for poly_per_obj in self.masks: - cropped_poly_per_obj = [] - for p in poly_per_obj: - # pycocotools will clip the boundary - p = p.copy() - p[0::2] -= bbox[0] - p[1::2] -= bbox[1] - cropped_poly_per_obj.append(p) - cropped_masks.append(cropped_poly_per_obj) - cropped_masks = PolygonMasks(cropped_masks, h, w) - return cropped_masks - - def pad(self, out_shape, pad_val=0): - """padding has no effect on polygons`""" - return PolygonMasks(self.masks, *out_shape) - - def expand(self, *args, **kwargs): - """TODO: Add expand for polygon""" - raise NotImplementedError - - def crop_and_resize(self, - bboxes, - out_shape, - inds, - device='cpu', - interpolation='bilinear'): - """see :func:`BaseInstanceMasks.crop_and_resize`""" - out_h, out_w = out_shape - if len(self.masks) == 0: - return PolygonMasks([], out_h, out_w) - - resized_masks = [] - for i in range(len(bboxes)): - mask = self.masks[inds[i]] - bbox = bboxes[i, :] - x1, y1, x2, y2 = bbox - w = np.maximum(x2 - x1, 1) - h = np.maximum(y2 - y1, 1) - h_scale = out_h / max(h, 0.1) # avoid too large scale - w_scale = out_w / max(w, 0.1) - - resized_mask = [] - for p in mask: - p = p.copy() - # crop - # pycocotools will clip the boundary - p[0::2] -= bbox[0] - p[1::2] -= bbox[1] - - # resize - p[0::2] *= w_scale - p[1::2] *= h_scale - resized_mask.append(p) - resized_masks.append(resized_mask) - return PolygonMasks(resized_masks, *out_shape) - - def translate(self, - out_shape, - offset, - direction='horizontal', - fill_val=None, - interpolation=None): - """Translate the PolygonMasks. - - Example: - >>> self = PolygonMasks.random(dtype=np.int) - >>> out_shape = (self.height, self.width) - >>> new = self.translate(out_shape, 4., direction='horizontal') - >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) - >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 - """ - assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ - f'used, and defaultly should be None or 0. got {fill_val}.' - if len(self.masks) == 0: - translated_masks = PolygonMasks([], *out_shape) - else: - translated_masks = [] - for poly_per_obj in self.masks: - translated_poly_per_obj = [] - for p in poly_per_obj: - p = p.copy() - if direction == 'horizontal': - p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) - elif direction == 'vertical': - p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) - translated_poly_per_obj.append(p) - translated_masks.append(translated_poly_per_obj) - translated_masks = PolygonMasks(translated_masks, *out_shape) - return translated_masks - - def shear(self, - out_shape, - magnitude, - direction='horizontal', - border_value=0, - interpolation='bilinear'): - """See :func:`BaseInstanceMasks.shear`.""" - if len(self.masks) == 0: - sheared_masks = PolygonMasks([], *out_shape) - else: - sheared_masks = [] - if direction == 'horizontal': - shear_matrix = np.stack([[1, magnitude], - [0, 1]]).astype(np.float32) - elif direction == 'vertical': - shear_matrix = np.stack([[1, 0], [magnitude, - 1]]).astype(np.float32) - for poly_per_obj in self.masks: - sheared_poly = [] - for p in poly_per_obj: - p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] - new_coords = np.matmul(shear_matrix, p) # [2, n] - new_coords[0, :] = np.clip(new_coords[0, :], 0, - out_shape[1]) - new_coords[1, :] = np.clip(new_coords[1, :], 0, - out_shape[0]) - sheared_poly.append( - new_coords.transpose((1, 0)).reshape(-1)) - sheared_masks.append(sheared_poly) - sheared_masks = PolygonMasks(sheared_masks, *out_shape) - return sheared_masks - - def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): - """See :func:`BaseInstanceMasks.rotate`.""" - if len(self.masks) == 0: - rotated_masks = PolygonMasks([], *out_shape) - else: - rotated_masks = [] - rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) - for poly_per_obj in self.masks: - rotated_poly = [] - for p in poly_per_obj: - p = p.copy() - coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] - # pad 1 to convert from format [x, y] to homogeneous - # coordinates format [x, y, 1] - coords = np.concatenate( - (coords, np.ones((coords.shape[0], 1), coords.dtype)), - axis=1) # [n, 3] - rotated_coords = np.matmul( - rotate_matrix[None, :, :], - coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] - rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, - out_shape[1]) - rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, - out_shape[0]) - rotated_poly.append(rotated_coords.reshape(-1)) - rotated_masks.append(rotated_poly) - rotated_masks = PolygonMasks(rotated_masks, *out_shape) - return rotated_masks - - def to_bitmap(self): - """convert polygon masks to bitmap masks.""" - bitmap_masks = self.to_ndarray() - return BitmapMasks(bitmap_masks, self.height, self.width) - - @property - def areas(self): - """Compute areas of masks. - - This func is modified from `detectron2 - `_. - The function only works with Polygons using the shoelace formula. - - Return: - ndarray: areas of each instance - """ # noqa: W501 - area = [] - for polygons_per_obj in self.masks: - area_per_obj = 0 - for p in polygons_per_obj: - area_per_obj += self._polygon_area(p[0::2], p[1::2]) - area.append(area_per_obj) - return np.asarray(area) - - def _polygon_area(self, x, y): - """Compute the area of a component of a polygon. - - Using the shoelace formula: - https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - - Args: - x (ndarray): x coordinates of the component - y (ndarray): y coordinates of the component - - Return: - float: the are of the component - """ # noqa: 501 - return 0.5 * np.abs( - np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) - - def to_ndarray(self): - """Convert masks to the format of ndarray.""" - if len(self.masks) == 0: - return np.empty((0, self.height, self.width), dtype=np.uint8) - bitmap_masks = [] - for poly_per_obj in self.masks: - bitmap_masks.append( - polygon_to_bitmap(poly_per_obj, self.height, self.width)) - return np.stack(bitmap_masks) - - def to_tensor(self, dtype, device): - """See :func:`BaseInstanceMasks.to_tensor`.""" - if len(self.masks) == 0: - return torch.empty((0, self.height, self.width), - dtype=dtype, - device=device) - ndarray_masks = self.to_ndarray() - return torch.tensor(ndarray_masks, dtype=dtype, device=device) - - @classmethod - def random(cls, - num_masks=3, - height=32, - width=32, - n_verts=5, - dtype=np.float32, - rng=None): - """Generate random polygon masks for demo / testing purposes. - - Adapted from [1]_ - - References: - .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 - - Example: - >>> from mmdet.core.mask.structures import PolygonMasks - >>> self = PolygonMasks.random() - >>> print('self = {}'.format(self)) - """ - from mmdet.utils.util_random import ensure_rng - rng = ensure_rng(rng) - - def _gen_polygon(n, irregularity, spikeyness): - """Creates the polygon by sampling points on a circle around the - centre. Random noise is added by varying the angular spacing - between sequential points, and by varying the radial distance of - each point from the centre. - - Based on original code by Mike Ounsworth - - Args: - n (int): number of vertices - irregularity (float): [0,1] indicating how much variance there - is in the angular spacing of vertices. [0,1] will map to - [0, 2pi/numberOfVerts] - spikeyness (float): [0,1] indicating how much variance there is - in each vertex from the circle of radius aveRadius. [0,1] - will map to [0, aveRadius] - - Returns: - a list of vertices, in CCW order. - """ - from scipy.stats import truncnorm - # Generate around the unit circle - cx, cy = (0.0, 0.0) - radius = 1 - - tau = np.pi * 2 - - irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n - spikeyness = np.clip(spikeyness, 1e-9, 1) - - # generate n angle steps - lower = (tau / n) - irregularity - upper = (tau / n) + irregularity - angle_steps = rng.uniform(lower, upper, n) - - # normalize the steps so that point 0 and point n+1 are the same - k = angle_steps.sum() / (2 * np.pi) - angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) - - # Convert high and low values to be wrt the standard normal range - # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html - low = 0 - high = 2 * radius - mean = radius - std = spikeyness - a = (low - mean) / std - b = (high - mean) / std - tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) - - # now generate the points - radii = tnorm.rvs(n, random_state=rng) - x_pts = cx + radii * np.cos(angles) - y_pts = cy + radii * np.sin(angles) - - points = np.hstack([x_pts[:, None], y_pts[:, None]]) - - # Scale to 0-1 space - points = points - points.min(axis=0) - points = points / points.max(axis=0) - - # Randomly place within 0-1 space - points = points * (rng.rand() * .8 + .2) - min_pt = points.min(axis=0) - max_pt = points.max(axis=0) - - high = (1 - max_pt) - low = (0 - min_pt) - offset = (rng.rand(2) * (high - low)) + low - points = points + offset - return points - - def _order_vertices(verts): - """ - References: - https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise - """ - mlat = verts.T[0].sum() / len(verts) - mlng = verts.T[1].sum() / len(verts) - - tau = np.pi * 2 - angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + - tau) % tau - sortx = angle.argsort() - verts = verts.take(sortx, axis=0) - return verts - - # Generate a random exterior for each requested mask - masks = [] - for _ in range(num_masks): - exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) - exterior = (exterior * [(width, height)]).astype(dtype) - masks.append([exterior.ravel()]) - - self = cls(masks, height, width) - return self - - -def polygon_to_bitmap(polygons, height, width): - """Convert masks from the form of polygons to bitmaps. - - Args: - polygons (list[ndarray]): masks in polygon representation - height (int): mask height - width (int): mask width - - Return: - ndarray: the converted masks in bitmap representation - """ - rles = maskUtils.frPyObjects(polygons, height, width) - rle = maskUtils.merge(rles) - bitmap_mask = maskUtils.decode(rle).astype(np.bool) - return bitmap_mask diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index 9e43af541f6e3df3f36479e736bb0c03fc916970..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ann_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py deleted file mode 100644 index 4bf3edd825296fbbed883effc3622793e9adf071..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/fpn_r50.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/point_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/point_head.py deleted file mode 100644 index 90a23635d935abf1179f181f04ab0f4265c6a1c7..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/point_head.py +++ /dev/null @@ -1,349 +0,0 @@ -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, normal_init -from mmcv.ops import point_sample - -from mmseg.models.builder import HEADS -from mmseg.ops import resize -from ..losses import accuracy -from .cascade_decode_head import BaseCascadeDecodeHead - - -def calculate_uncertainty(seg_logits): - """Estimate uncertainty based on seg logits. - - For each location of the prediction ``seg_logits`` we estimate - uncertainty as the difference between top first and top second - predicted logits. - - Args: - seg_logits (Tensor): Semantic segmentation logits, - shape (batch_size, num_classes, height, width). - - Returns: - scores (Tensor): T uncertainty scores with the most uncertain - locations having the highest uncertainty score, shape ( - batch_size, 1, height, width) - """ - top2_scores = torch.topk(seg_logits, k=2, dim=1)[0] - return (top2_scores[:, 1] - top2_scores[:, 0]).unsqueeze(1) - - -@HEADS.register_module() -class PointHead(BaseCascadeDecodeHead): - """A mask point head use in PointRend. - - ``PointHead`` use shared multi-layer perceptron (equivalent to - nn.Conv1d) to predict the logit of input points. The fine-grained feature - and coarse feature will be concatenate together for predication. - - Args: - num_fcs (int): Number of fc layers in the head. Default: 3. - in_channels (int): Number of input channels. Default: 256. - fc_channels (int): Number of fc channels. Default: 256. - num_classes (int): Number of classes for logits. Default: 80. - class_agnostic (bool): Whether use class agnostic classification. - If so, the output channels of logits will be 1. Default: False. - coarse_pred_each_layer (bool): Whether concatenate coarse feature with - the output of each fc layer. Default: True. - conv_cfg (dict|None): Dictionary to construct and config conv layer. - Default: dict(type='Conv1d')) - norm_cfg (dict|None): Dictionary to construct and config norm layer. - Default: None. - loss_point (dict): Dictionary to construct and config loss layer of - point head. Default: dict(type='CrossEntropyLoss', use_mask=True, - loss_weight=1.0). - """ - - def __init__(self, - num_fcs=3, - coarse_pred_each_layer=True, - conv_cfg=dict(type='Conv1d'), - norm_cfg=None, - act_cfg=dict(type='ReLU', inplace=False), - **kwargs): - super(PointHead, self).__init__( - input_transform='multiple_select', - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - **kwargs) - - self.num_fcs = num_fcs - self.coarse_pred_each_layer = coarse_pred_each_layer - - fc_in_channels = sum(self.in_channels) + self.num_classes - fc_channels = self.channels - self.fcs = nn.ModuleList() - for k in range(num_fcs): - fc = ConvModule( - fc_in_channels, - fc_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.fcs.append(fc) - fc_in_channels = fc_channels - fc_in_channels += self.num_classes if self.coarse_pred_each_layer \ - else 0 - self.fc_seg = nn.Conv1d( - fc_in_channels, - self.num_classes, - kernel_size=1, - stride=1, - padding=0) - if self.dropout_ratio > 0: - self.dropout = nn.Dropout(self.dropout_ratio) - delattr(self, 'conv_seg') - - def init_weights(self): - """Initialize weights of classification layer.""" - normal_init(self.fc_seg, std=0.001) - - def cls_seg(self, feat): - """Classify each pixel with fc.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.fc_seg(feat) - return output - - def forward(self, fine_grained_point_feats, coarse_point_feats): - x = torch.cat([fine_grained_point_feats, coarse_point_feats], dim=1) - for fc in self.fcs: - x = fc(x) - if self.coarse_pred_each_layer: - x = torch.cat((x, coarse_point_feats), dim=1) - return self.cls_seg(x) - - def _get_fine_grained_point_feats(self, x, points): - """Sample from fine grained features. - - Args: - x (list[Tensor]): Feature pyramid from by neck or backbone. - points (Tensor): Point coordinates, shape (batch_size, - num_points, 2). - - Returns: - fine_grained_feats (Tensor): Sampled fine grained feature, - shape (batch_size, sum(channels of x), num_points). - """ - - fine_grained_feats_list = [ - point_sample(_, points, align_corners=self.align_corners) - for _ in x - ] - if len(fine_grained_feats_list) > 1: - fine_grained_feats = torch.cat(fine_grained_feats_list, dim=1) - else: - fine_grained_feats = fine_grained_feats_list[0] - - return fine_grained_feats - - def _get_coarse_point_feats(self, prev_output, points): - """Sample from fine grained features. - - Args: - prev_output (list[Tensor]): Prediction of previous decode head. - points (Tensor): Point coordinates, shape (batch_size, - num_points, 2). - - Returns: - coarse_feats (Tensor): Sampled coarse feature, shape (batch_size, - num_classes, num_points). - """ - - coarse_feats = point_sample( - prev_output, points, align_corners=self.align_corners) - - return coarse_feats - - def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, - train_cfg): - """Forward function for training. - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - train_cfg (dict): The training config. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - x = self._transform_inputs(inputs) - with torch.no_grad(): - points = self.get_points_train( - prev_output, calculate_uncertainty, cfg=train_cfg) - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, points) - coarse_point_feats = self._get_coarse_point_feats(prev_output, points) - point_logits = self.forward(fine_grained_point_feats, - coarse_point_feats) - point_label = point_sample( - gt_semantic_seg.float(), - points, - mode='nearest', - align_corners=self.align_corners) - point_label = point_label.squeeze(1).long() - - losses = self.losses(point_logits, point_label) - - return losses - - def forward_test(self, inputs, prev_output, img_metas, test_cfg): - """Forward function for testing. - - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - test_cfg (dict): The testing config. - - Returns: - Tensor: Output segmentation map. - """ - - x = self._transform_inputs(inputs) - refined_seg_logits = prev_output.clone() - for _ in range(test_cfg.subdivision_steps): - refined_seg_logits = resize( - refined_seg_logits, - scale_factor=test_cfg.scale_factor, - mode='bilinear', - align_corners=self.align_corners) - batch_size, channels, height, width = refined_seg_logits.shape - point_indices, points = self.get_points_test( - refined_seg_logits, calculate_uncertainty, cfg=test_cfg) - fine_grained_point_feats = self._get_fine_grained_point_feats( - x, points) - coarse_point_feats = self._get_coarse_point_feats( - prev_output, points) - point_logits = self.forward(fine_grained_point_feats, - coarse_point_feats) - - point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1) - refined_seg_logits = refined_seg_logits.reshape( - batch_size, channels, height * width) - refined_seg_logits = refined_seg_logits.scatter_( - 2, point_indices, point_logits) - refined_seg_logits = refined_seg_logits.view( - batch_size, channels, height, width) - - return refined_seg_logits - - def losses(self, point_logits, point_label): - """Compute segmentation loss.""" - loss = dict() - loss['loss_point'] = self.loss_decode( - point_logits, point_label, ignore_index=self.ignore_index) - loss['acc_point'] = accuracy(point_logits, point_label) - return loss - - def get_points_train(self, seg_logits, uncertainty_func, cfg): - """Sample points for training. - - Sample points in [0, 1] x [0, 1] coordinate space based on their - uncertainty. The uncertainties are calculated for each point using - 'uncertainty_func' function that takes point's logit prediction as - input. - - Args: - seg_logits (Tensor): Semantic segmentation logits, shape ( - batch_size, num_classes, height, width). - uncertainty_func (func): uncertainty calculation function. - cfg (dict): Training config of point head. - - Returns: - point_coords (Tensor): A tensor of shape (batch_size, num_points, - 2) that contains the coordinates of ``num_points`` sampled - points. - """ - num_points = cfg.num_points - oversample_ratio = cfg.oversample_ratio - importance_sample_ratio = cfg.importance_sample_ratio - assert oversample_ratio >= 1 - assert 0 <= importance_sample_ratio <= 1 - batch_size = seg_logits.shape[0] - num_sampled = int(num_points * oversample_ratio) - point_coords = torch.rand( - batch_size, num_sampled, 2, device=seg_logits.device) - point_logits = point_sample(seg_logits, point_coords) - # It is crucial to calculate uncertainty based on the sampled - # prediction value for the points. Calculating uncertainties of the - # coarse predictions first and sampling them for points leads to - # incorrect results. To illustrate this: assume uncertainty func( - # logits)=-abs(logits), a sampled point between two coarse - # predictions with -1 and 1 logits has 0 logits, and therefore 0 - # uncertainty value. However, if we calculate uncertainties for the - # coarse predictions first, both will have -1 uncertainty, - # and sampled point will get -1 uncertainty. - point_uncertainties = uncertainty_func(point_logits) - num_uncertain_points = int(importance_sample_ratio * num_points) - num_random_points = num_points - num_uncertain_points - idx = torch.topk( - point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] - shift = num_sampled * torch.arange( - batch_size, dtype=torch.long, device=seg_logits.device) - idx += shift[:, None] - point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( - batch_size, num_uncertain_points, 2) - if num_random_points > 0: - rand_point_coords = torch.rand( - batch_size, num_random_points, 2, device=seg_logits.device) - point_coords = torch.cat((point_coords, rand_point_coords), dim=1) - return point_coords - - def get_points_test(self, seg_logits, uncertainty_func, cfg): - """Sample points for testing. - - Find ``num_points`` most uncertain points from ``uncertainty_map``. - - Args: - seg_logits (Tensor): A tensor of shape (batch_size, num_classes, - height, width) for class-specific or class-agnostic prediction. - uncertainty_func (func): uncertainty calculation function. - cfg (dict): Testing config of point head. - - Returns: - point_indices (Tensor): A tensor of shape (batch_size, num_points) - that contains indices from [0, height x width) of the most - uncertain points. - point_coords (Tensor): A tensor of shape (batch_size, num_points, - 2) that contains [0, 1] x [0, 1] normalized coordinates of the - most uncertain points from the ``height x width`` grid . - """ - - num_points = cfg.subdivision_num_points - uncertainty_map = uncertainty_func(seg_logits) - batch_size, _, height, width = uncertainty_map.shape - h_step = 1.0 / height - w_step = 1.0 / width - - uncertainty_map = uncertainty_map.view(batch_size, height * width) - num_points = min(height * width, num_points) - point_indices = uncertainty_map.topk(num_points, dim=1)[1] - point_coords = torch.zeros( - batch_size, - num_points, - 2, - dtype=torch.float, - device=seg_logits.device) - point_coords[:, :, 0] = w_step / 2.0 + (point_indices % - width).float() * w_step - point_coords[:, :, 1] = h_step / 2.0 + (point_indices // - width).float() * h_step - return point_indices, point_coords diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_transformer.py b/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_transformer.py deleted file mode 100644 index ff7dfe4c2de05112aec55ddea9c8fd978668f80b..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus/tests/modules/test_transformer.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.transformer import ( - StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend) - - -def test_transformer_causal_streaming(): - torch.manual_seed(1234) - - for context, custom in product([None, 10], [False, True]): - # Test that causality and receptive fields are properly handled. - # looking at the gradients - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=custom, - dropout=0.) - steps = 20 - for k in [0, 10, 15, 19]: - x = torch.randn(4, steps, 16, requires_grad=True) - y = tr(x) - y[:, k].abs().sum().backward() - if k + 1 < steps: - assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm() - assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm() - if context is not None and k > context: - limit = k - context - 1 - assert torch.allclose(x.grad[:, :limit], - torch.tensor(0.)), x.grad[:, :limit].norm() - - # Now check that streaming gives the same result at batch eval. - x = torch.randn(4, steps, 16) - y = tr(x) - ys = [] - with tr.streaming(): - for k in range(steps): - chunk = x[:, k:k + 1, :] - ys.append(tr(chunk)) - y_stream = torch.cat(ys, dim=1) - delta = torch.norm(y_stream - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_transformer_vs_pytorch(): - torch.manual_seed(1234) - # Check that in the non causal setting, we get the same result as - # PyTorch Transformer encoder. - for custom in [False, True]: - tr = StreamingTransformer( - 16, 4, 2, - causal=False, custom=custom, dropout=0., positional_scale=0.) - layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True) - tr_ref = torch.nn.TransformerEncoder(layer, 2) - tr.load_state_dict(tr_ref.state_dict()) - - x = torch.randn(4, 20, 16) - y = tr(x) - y2 = tr_ref(x) - delta = torch.norm(y2 - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_streaming_api(): - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.) - tr.eval() - steps = 12 - x = torch.randn(1, steps, 16) - - with torch.no_grad(): - with tr.streaming(): - _ = tr(x[:, :1]) - state = {k: v.clone() for k, v in tr.get_streaming_state().items()} - y = tr(x[:, 1:2]) - tr.set_streaming_state(state) - y2 = tr(x[:, 1:2]) - assert torch.allclose(y, y2), (y - y2).norm() - assert tr.flush() is None - - -def test_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1) - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - assert torch.allclose(y, y2), ((y - y2).norm(), backend) - - -def test_attention_as_float32(): - torch.manual_seed(1234) - cases = [ - {'custom': True}, - {'custom': False}, - ] - for case in cases: - tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case) - tr_float32 = StreamingTransformer( - 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case) - if not case['custom']: - # we are not using autocast here because it doesn't really - # work as expected on CPU, so we have to manually cast the weights of the MHA. - for layer in tr_float32.layers: - layer.self_attn.mha.to(torch.float32) - tr_float32.load_state_dict(tr.state_dict()) - steps = 12 - x = torch.randn(3, steps, 16, dtype=torch.bfloat16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_float32(x) - assert not torch.allclose(y, y2), (y - y2).norm() - - -@torch.no_grad() -def test_streaming_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, causal=True) - tr.load_state_dict(tr_mem_efficient.state_dict()) - tr.eval() - tr_mem_efficient.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr_mem_efficient.streaming(): - outs = [] - # frame_sizes = [2] + [1] * (steps - 2) - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr_mem_efficient(frame)) - - out = torch.cat(outs, dim=1) - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_cross_attention(): - torch.manual_seed(1234) - for norm_first in [True, False]: - m = StreamingTransformer( - 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True) - m_cross = StreamingTransformer( - 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True) - m_cross.load_state_dict(m.state_dict(), strict=False) - x = torch.randn(2, 5, 16) - cross_x = torch.randn(2, 3, 16) - y_ref = m(x) - y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x) - # With norm_first, the two should be exactly yhe same, - # but with norm_first=False, we get 2 normalization in a row - # and the epsilon value leads to a tiny change. - atol = 0. if norm_first else 1e-6 - print((y_ref - y_cross_zero).norm() / y_ref.norm()) - assert torch.allclose(y_ref, y_cross_zero, atol=atol) - - # We now expect a difference even with a generous atol of 1e-2. - y_cross = m_cross(x, cross_attention_src=cross_x) - assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2) - - with pytest.raises(AssertionError): - _ = m_cross(x) - _ = m(x, cross_attention_src=cross_x) - - -def test_cross_attention_compat(): - torch.manual_seed(1234) - num_heads = 2 - dim = num_heads * 64 - with pytest.raises(AssertionError): - StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True) - - cross_attn = StreamingMultiheadAttention( - dim, num_heads, dropout=0, cross_attention=True, custom=True) - ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True) - - # We can load the regular attention state dict - # so we have compat when loading old checkpoints. - cross_attn.load_state_dict(ref_attn.state_dict()) - - queries = torch.randn(3, 7, dim) - keys = torch.randn(3, 9, dim) - values = torch.randn(3, 9, dim) - - y = cross_attn(queries, keys, values)[0] - y_ref = ref_attn(queries, keys, values)[0] - assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm() - - # Now let's check that streaming is working properly. - with cross_attn.streaming(): - ys = [] - for step in range(queries.shape[1]): - ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0]) - y_streaming = torch.cat(ys, dim=1) - assert torch.allclose(y_streaming, y, atol=1e-7) - - -def test_repeat_kv(): - torch.manual_seed(1234) - num_heads = 8 - kv_repeat = 4 - dim = num_heads * 64 - with pytest.raises(AssertionError): - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True) - x = torch.randn(4, 18, dim) - y = mha(x, x, x)[0] - assert x.shape == y.shape - - -def test_qk_layer_norm(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False) - steps = 12 - x = torch.randn(3, steps, 16) - y = tr(x) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True) - z = torch.randn(3, 21, 16) - y = tr(x, cross_attention_src=z) - assert y.shape == x.shape diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HoangHa/llama2-code/model.py b/spaces/HoangHa/llama2-code/model.py deleted file mode 100644 index df434946b31d6d58ff4a7ea3b9bd5f1375be5534..0000000000000000000000000000000000000000 --- a/spaces/HoangHa/llama2-code/model.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import Iterator -from llama_cpp import Llama -from huggingface_hub import hf_hub_download - - -def download_model(): - # See https://github.com/OpenAccess-AI-Collective/ggml-webui/blob/main/tabbed.py - # https://huggingface.co/spaces/kat33/llama.cpp/blob/main/app.py - print(f"Downloading model: {model_repo}/{model_filename}") - file = hf_hub_download( - repo_id=model_repo, filename=model_filename - ) - print("Downloaded " + file) - return file - -model_repo = "TheBloke/CodeLlama-7B-Instruct-GGUF" -model_filename = "codellama-7b-instruct.Q4_K_S.gguf" - -model_path = download_model() - -# load Llama-2 -llm = Llama(model_path=model_path, n_ctx=4000, verbose=False) - - -def get_prompt(message: str, chat_history: list[tuple[str, str]], - system_prompt: str) -> str: - texts = [f'[INST] <>\n{system_prompt}\n<>\n\n'] - for user_input, response in chat_history: - texts.append(f'{user_input.strip()} [/INST] {response.strip()} [INST] ') - texts.append(f'{message.strip()} [/INST]') - return ''.join(texts) - -def generate(prompt, max_new_tokens, temperature, top_p, top_k): - return llm(prompt, - max_tokens=max_new_tokens, - stop=[""], - temperature=temperature, - top_p=top_p, - top_k=top_k, - stream=False) - - -def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int: - prompt = get_prompt(message, chat_history, system_prompt) - input_ids = llm.tokenize(prompt.encode('utf-8')) - return len(input_ids) - - -def run(message: str, - chat_history: list[tuple[str, str]], - system_prompt: str, - max_new_tokens: int = 1024, - temperature: float = 0.8, - top_p: float = 0.95, - top_k: int = 50) -> Iterator[str]: - prompt = get_prompt(message, chat_history, system_prompt) - output = generate(prompt, max_new_tokens, temperature, top_p, top_k) - yield output['choices'][0]['text'] - - # outputs = [] - # for resp in streamer: - # outputs.append(resp['choices'][0]['text']) - # yield ''.join(outputs) diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py deleted file mode 100644 index a28cd607a096844438f6a3ba6b007d94d67d1bc8..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/evaluation/get_eval_manifest.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import csv -from pathlib import Path - - -def main(args): - """ - `uid syn ref text` - """ - in_root = Path(args.generation_root).resolve() - ext = args.audio_format - with open(args.audio_manifest) as f, open(args.output_path, "w") as f_out: - reader = csv.DictReader( - f, delimiter="\t", quotechar=None, doublequote=False, - lineterminator="\n", quoting=csv.QUOTE_NONE - ) - header = ["id", "syn", "ref", "text", "speaker"] - f_out.write("\t".join(header) + "\n") - for row in reader: - dir_name = f"{ext}_{args.sample_rate}hz_{args.vocoder}" - id_ = row["id"] - syn = (in_root / dir_name / f"{id_}.{ext}").as_posix() - ref = row["audio"] - if args.use_resynthesized_target: - ref = (in_root / f"{dir_name}_tgt" / f"{id_}.{ext}").as_posix() - sample = [id_, syn, ref, row["tgt_text"], row["speaker"]] - f_out.write("\t".join(sample) + "\n") - print(f"wrote evaluation file to {args.output_path}") - - -if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser() - parser.add_argument( - "--generation-root", help="output directory for generate_waveform.py" - ) - parser.add_argument( - "--audio-manifest", - help="used to determine the original utterance ID and text" - ) - parser.add_argument( - "--output-path", help="path to output evaluation spec file" - ) - parser.add_argument( - "--use-resynthesized-target", action="store_true", - help="use resynthesized reference instead of the original audio" - ) - parser.add_argument("--vocoder", type=str, default="griffin_lim") - parser.add_argument("--sample-rate", type=int, default=22_050) - parser.add_argument("--audio-format", type=str, default="wav") - args = parser.parse_args() - - main(args) diff --git a/spaces/ICML2022/resefa/utils/tf_utils.py b/spaces/ICML2022/resefa/utils/tf_utils.py deleted file mode 100644 index 80e48ed06e614571d920125d4b64fbfefbf804c0..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/utils/tf_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -# python3.7 -"""Contains the utility functions to handle import TensorFlow modules. - -Basically, TensorFlow may not be supported in the current environment, or may -cause some warnings. This file provides functions to help ease TensorFlow -related imports, such as TensorBoard. -""" - -import warnings - -__all__ = ['import_tf', 'import_tb_writer'] - - -def import_tf(): - """Imports TensorFlow module if possible. - - If `ImportError` is raised, `None` will be returned. Otherwise, the module - `tensorflow` will be returned. - """ - warnings.filterwarnings('ignore', category=FutureWarning) - try: - import tensorflow as tf # pylint: disable=import-outside-toplevel - tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) - module = tf - except ImportError: - module = None - warnings.filterwarnings('default', category=FutureWarning) - return module - - -def import_tb_writer(): - """Imports the SummaryWriter of TensorBoard. - - If `ImportError` is raised, `None` will be returned. Otherwise, the class - `SummaryWriter` will be returned. - - NOTE: This function attempts to import `SummaryWriter` from - `torch.utils.tensorboard`. But it does not necessarily mean the import - always succeeds because installing TensorBoard is not a duty of `PyTorch`. - """ - warnings.filterwarnings('ignore', category=FutureWarning) - try: - from torch.utils.tensorboard import SummaryWriter # pylint: disable=import-outside-toplevel - except ImportError: # In case TensorBoard is not supported. - SummaryWriter = None - warnings.filterwarnings('default', category=FutureWarning) - return SummaryWriter diff --git a/spaces/JMalott/ai_architecture/utils.py b/spaces/JMalott/ai_architecture/utils.py deleted file mode 100644 index 63e6f58734846c5c456d931c6fbc7525fde2009c..0000000000000000000000000000000000000000 --- a/spaces/JMalott/ai_architecture/utils.py +++ /dev/null @@ -1,215 +0,0 @@ -from htbuilder import HtmlElement, div, ul, li, br, hr, a, p, img, styles, classes, fonts -from htbuilder.units import percent, px -from htbuilder.funcs import rgba, rgb -import streamlit as st -import os -import sys -import argparse -import clip -import numpy as np -from PIL import Image -from dalle.models import Dalle -from dalle.utils.utils import set_seed, clip_score -import streamlit.components.v1 as components -import torch -#from IPython.display import display -import random - -def link(link, text, **style): - return a(_href=link, _target="_blank", style=styles(**style))(text) - -def layout(*args): - - style = """ - - """ - - style_div = styles( - position="fixed", - left=0, - bottom=0, - margin=px(0, 0, 0, 0), - width=percent(100), - color="black", - text_align="center", - height="auto", - opacity=1 - ) - - style_hr = styles( - display="block", - margin=px(8, 8, "auto", "auto"), - border_style="inset", - border_width=px(2) - ) - - body = p() - foot = div( - style=style_div - )( - hr( - style=style_hr - ), - body - ) - - st.markdown(style, unsafe_allow_html=True) - - for arg in args: - if isinstance(arg, str): - body(arg) - - elif isinstance(arg, HtmlElement): - body(arg) - - st.markdown(str(foot), unsafe_allow_html=True) - - -def footer(): - - #myargs = [] - #layout(*myargs) - style = """ - - """ - st.markdown(style, unsafe_allow_html=True) - - st.markdown("") - st.markdown("") - st.markdown("") - st.markdown("This app uses the [min(DALL·E)](https://github.com/kuprel/min-dalle) port of [DALL·E mini](https://github.com/borisdayma/dalle-mini)") - st.markdown("Created by [Jonathan Malott](https://jonathanmalott.com)") - st.markdown("[Good Systems Grand Challenge](https://bridgingbarriers.utexas.edu/good-systems), The University of Texas at Austin. Advised by Dr. Junfeng Jiao.") - - - - -from min_dalle import MinDalle - -def generate2(prompt,crazy,k): - - - mm = MinDalle( - models_root='./pretrained', - dtype=torch.float32, - device='cpu', - is_mega=False, - is_reusable=True - ) - - # Sampling - newPrompt = prompt - if("architecture" not in prompt.lower() ): - newPrompt += " architecture" - - image = mm.generate_image( - text=newPrompt, - seed=np.random.randint(0,10000), - grid_size=1, - is_seamless=False, - temperature=crazy, - top_k=k,#2128, - supercondition_factor=32, - is_verbose=False - ) - - item = {} - item['prompt'] = prompt - item['crazy'] = crazy - item['k'] = k - item['image'] = image - st.session_state.results.append(item) - -model = False -def generate(prompt,crazy,k): - global model - - device = 'cpu' - if(model == False): - model = Dalle.from_pretrained('minDALL-E/1.3B') # This will automatically download the pretrained model. - model.to(device=device) - - num_candidates = 1 - - images = [] - - set_seed(np.random.randint(0,10000)) - - # Sampling - newPrompt = prompt - if("architecture" not in prompt.lower() ): - newPrompt += " architecture" - - images = model.sampling(prompt=newPrompt, - top_k=k, - top_p=None, - softmax_temperature=crazy, - num_candidates=num_candidates, - device=device).cpu().numpy() - images = np.transpose(images, (0, 2, 3, 1)) - - # CLIP Re-ranking - model_clip, preprocess_clip = clip.load("ViT-B/32", device=device) - model_clip.to(device=device) - rank = clip_score(prompt=newPrompt, - images=images, - model_clip=model_clip, - preprocess_clip=preprocess_clip, - device=device) - - result = images[rank] - - item = {} - item['prompt'] = prompt - item['crazy'] = crazy - item['k'] = k - item['image'] = Image.fromarray((result*255).astype(np.uint8)) - st.session_state.results.append(item) - - -def drawGrid(): - master = {} - - for r in st.session_state.results[::-1]: - _txt = r['prompt']+" "+str(r['crazy'])+" "+str(r['k']) - if(_txt not in master): - master[_txt] = [r] - else: - master[_txt].append(r) - - - for i in st.session_state.images: - im = st.empty() - - - placeholder = st.empty() - with placeholder.container(): - - for m in master: - - txt = master[m][0]['prompt']+" (Temperature:"+ str(master[m][0]['crazy']) + ", Top K:" + str(master[m][0]['k']) + ")" - st.subheader(txt) - col1, col2, col3 = st.columns(3) - - for ix, item in enumerate(master[m]): - if ix % 3 == 0: - with col1: - st.session_state.images.append(st.image(item["image"])) - if ix % 3 == 1: - with col2: - st.session_state.images.append(st.image(item["image"])) - if ix % 3 == 2: - with col3: - st.session_state.images.append(st.image(item["image"])) - - diff --git a/spaces/Jarvis2301/Aku/README.md b/spaces/Jarvis2301/Aku/README.md deleted file mode 100644 index b4a19e1904c4c7173dccebb87f3543c9d410d359..0000000000000000000000000000000000000000 --- a/spaces/Jarvis2301/Aku/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -license: apache-2.0 -title: ' vits-uma-genshin-honkai' -sdk: gradio -sdk_version: 3.7 -emoji: 🐨 -colorTo: yellow -pinned: false -app_file: app.py -duplicated_from: WorldlineChanger/sayashi-vits-uma-genshin-honkai ---- diff --git a/spaces/Jikiwi/sovits-models/inference_main.py b/spaces/Jikiwi/sovits-models/inference_main.py deleted file mode 100644 index 3b2c32ac9e29e6b016e656e937fede5d2c23e7e6..0000000000000000000000000000000000000000 --- a/spaces/Jikiwi/sovits-models/inference_main.py +++ /dev/null @@ -1,130 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - - - -def main(): - import argparse - - parser = argparse.ArgumentParser(description='sovits4 inference') - - # 一定要设置的部分 - parser.add_argument('-m', '--model_path', type=str, default="logs/44k/G_0.pth", help='模型路径') - parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径') - parser.add_argument('-cl', '--clip', type=float, default=0, help='音频强制切片,默认0为自动切片,单位为秒/s') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src.wav"], help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nen'], help='合成目标说话人名称') - - # 可选项部分 - parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False,help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, default="logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=0, help='聚类方案占比,范围0-1,若没有训练聚类模型则默认0即可') - parser.add_argument('-lg', '--linear_gradient', type=float, default=0, help='两段音频切片的交叉淡入长度,如果强制切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,单位为秒') - parser.add_argument('-fmp', '--f0_mean_pooling', type=bool, default=False, help='是否对F0使用均值滤波器(池化),对部分哑音有改善。注意,启动该选项会导致推理速度下降,默认关闭') - - # 不用动的部分 - parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式') - parser.add_argument('-lgr', '--linear_gradient_retain', type=float, default=0.75, help='自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭') - - args = parser.parse_args() - - svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path) - infer_tool.mkdir(["raw", "results"]) - clean_names = args.clean_names - trans = args.trans - spk_list = args.spk_list - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - clip = args.clip - lg = args.linear_gradient - lgr = args.linear_gradient_retain - F0_mean_pooling = args.f0_mean_pooling - - infer_tool.fill_a_to_b(trans, clean_names) - for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - per_size = int(clip*audio_sr) - lg_size = int(lg*audio_sr) - lg_size_r = int(lg_size*lgr) - lg_size_c_l = (lg_size-lg_size_r)//2 - lg_size_c_r = lg_size-lg_size_r-lg_size_c_l - lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0 - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - audio.extend(list(infer_tool.pad_array(_audio, length))) - continue - if per_size != 0: - datas = infer_tool.split_list_by_n(data, per_size,lg_size) - else: - datas = [data] - for k,dat in enumerate(datas): - per_length = int(np.ceil(len(dat) / audio_sr * svc_model.target_sample)) if clip!=0 else length - if clip!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])]) - raw_path = io.BytesIO() - soundfile.write(raw_path, dat, audio_sr, format="wav") - raw_path.seek(0) - out_audio, out_sr = svc_model.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale, - F0_mean_pooling = F0_mean_pooling - ) - _audio = out_audio.cpu().numpy() - pad_len = int(svc_model.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - _audio = infer_tool.pad_array(_audio, per_length) - if lg_size!=0 and k!=0: - lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr != 1 else audio[-lg_size:] - lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r] if lgr != 1 else _audio[0:lg_size] - lg_pre = lg1*(1-lg)+lg2*lg - audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr != 1 else audio[0:-lg_size] - audio.extend(lg_pre) - _audio = _audio[lg_size_c_l+lg_size_r:] if lgr != 1 else _audio[lg_size:] - audio.extend(list(_audio)) - key = "auto" if auto_predict_f0 else f"{tran}key" - cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" - res_path = f'./results/{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) - -if __name__ == '__main__': - main() diff --git a/spaces/Junlinh/memorability_prediction/app.py b/spaces/Junlinh/memorability_prediction/app.py deleted file mode 100644 index b68101b3389be1b011528233a180169c8930729e..0000000000000000000000000000000000000000 --- a/spaces/Junlinh/memorability_prediction/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import gradio as gr -import torchvision.transforms as transforms -from PIL import Image -import torch -from timm.models import create_model -import numpy as np -def predict(input_img): - input_img = Image.fromarray(np.uint8(input_img)) - model1 = create_model( - 'resnet50', - drop_rate=0.5, - num_classes=1,) - model2 = create_model( - 'resnet50', - drop_rate=0.5, - num_classes=1,) - - checkpoint1 = torch.load("./machine_full_best.tar",map_location=torch.device('cpu')) - model1.load_state_dict(checkpoint1['state_dict']) - checkpoint2 = torch.load("./human_full_best.tar",map_location=torch.device('cpu')) - model2.load_state_dict(checkpoint2['state_dict']) - - my_transform = transforms.Compose([ - transforms.RandomResizedCrop(224, (1, 1)), - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]),]) - - input_img = my_transform(input_img).view(1,3,224,224) - model1.eval() - model2.eval() - result1 = round(model1(input_img).item(), 3) - result2 = round(model2(input_img).item(), 3) - result = 'MachineMem score = ' + str(result1) + ', HumanMem score = ' + str(result2) +'.' - return result - -demo = gr.Interface(predict, gr.Image(), "text", examples=["1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg", "10.jpg", "12.jpg", "13.jpg", "14.jpg", "15.jpg", "16.jpg", "18.jpg", "19.jpg", "20.jpg","21.jpg", "22.jpg", "24.jpg", "25.jpg", "26.jpg", "27.jpg", "28.jpg", "30.jpg","32.jpg", "35.jpg", "36.jpg", "37.jpg"]) -demo.launch(debug = True) \ No newline at end of file diff --git a/spaces/Kangarroar/ApplioRVC-Inference/tools/infer_cli.py b/spaces/Kangarroar/ApplioRVC-Inference/tools/infer_cli.py deleted file mode 100644 index bbe0a53c1aac6a8f2d42613d554b2bdd07abea2d..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/tools/infer_cli.py +++ /dev/null @@ -1,67 +0,0 @@ -import argparse -import os -import sys - -now_dir = os.getcwd() -sys.path.append(now_dir) -from dotenv import load_dotenv -from scipy.io import wavfile - -from configs.config import Config -from infer.modules.vc.modules import VC - -#### -# USAGE -# -# In your Terminal or CMD or whatever - - -def arg_parse() -> tuple: - parser = argparse.ArgumentParser() - parser.add_argument("--f0up_key", type=int, default=0) - parser.add_argument("--input_path", type=str, help="input path") - parser.add_argument("--index_path", type=str, help="index path") - parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm") - parser.add_argument("--opt_path", type=str, help="opt path") - parser.add_argument("--model_name", type=str, help="store in assets/weight_root") - parser.add_argument("--index_rate", type=float, default=0.66, help="index rate") - parser.add_argument("--device", type=str, help="device") - parser.add_argument("--is_half", type=bool, help="use half -> True") - parser.add_argument("--filter_radius", type=int, default=3, help="filter radius") - parser.add_argument("--resample_sr", type=int, default=0, help="resample sr") - parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate") - parser.add_argument("--protect", type=float, default=0.33, help="protect") - - args = parser.parse_args() - sys.argv = sys.argv[:1] - - return args - - -def main(): - load_dotenv() - args = arg_parse() - config = Config() - config.device = args.device if args.device else config.device - config.is_half = args.is_half if args.is_half else config.is_half - vc = VC(config) - vc.get_vc(args.model_name) - _, wav_opt = vc.vc_single( - 0, - args.input_path, - args.f0up_key, - None, - args.f0method, - args.index_path, - None, - args.index_rate, - args.filter_radius, - args.resample_sr, - args.rms_mix_rate, - args.protect, - ) - wavfile.write(args.opt_path, wav_opt[0], wav_opt[1]) - - -if __name__ == "__main__": - main() diff --git a/spaces/Kevin676/Clone-Your-Voice/encoder/__init__.py b/spaces/Kevin676/Clone-Your-Voice/encoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Kevin676/Clone-Your-Voice/vocoder/inference.py b/spaces/Kevin676/Clone-Your-Voice/vocoder/inference.py deleted file mode 100644 index 7e546845da0b8cdb18b34fbd332b9aaa39cea55c..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/vocoder/inference.py +++ /dev/null @@ -1,64 +0,0 @@ -from vocoder.models.fatchord_version import WaveRNN -from vocoder import hparams as hp -import torch - - -_model = None # type: WaveRNN - -def load_model(weights_fpath, verbose=True): - global _model, _device - - if verbose: - print("Building Wave-RNN") - _model = WaveRNN( - rnn_dims=hp.voc_rnn_dims, - fc_dims=hp.voc_fc_dims, - bits=hp.bits, - pad=hp.voc_pad, - upsample_factors=hp.voc_upsample_factors, - feat_dims=hp.num_mels, - compute_dims=hp.voc_compute_dims, - res_out_dims=hp.voc_res_out_dims, - res_blocks=hp.voc_res_blocks, - hop_length=hp.hop_length, - sample_rate=hp.sample_rate, - mode=hp.voc_mode - ) - - if torch.cuda.is_available(): - _model = _model.cuda() - _device = torch.device('cuda') - else: - _device = torch.device('cpu') - - if verbose: - print("Loading model weights at %s" % weights_fpath) - checkpoint = torch.load(weights_fpath, _device) - _model.load_state_dict(checkpoint['model_state']) - _model.eval() - - -def is_loaded(): - return _model is not None - - -def infer_waveform(mel, normalize=True, batched=True, target=8000, overlap=800, - progress_callback=None): - """ - Infers the waveform of a mel spectrogram output by the synthesizer (the format must match - that of the synthesizer!) - - :param normalize: - :param batched: - :param target: - :param overlap: - :return: - """ - if _model is None: - raise Exception("Please load Wave-RNN in memory before using it") - - if normalize: - mel = mel / hp.mel_max_abs_value - mel = torch.from_numpy(mel[None, ...]) - wav = _model.generate(mel, batched, target, overlap, hp.mu_law, progress_callback) - return wav diff --git a/spaces/KyanChen/FunSR/models/blocks/CSPLayer.py b/spaces/KyanChen/FunSR/models/blocks/CSPLayer.py deleted file mode 100644 index 807ed7ae503840e5b8b602549a8ab15d35b1bbe2..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/blocks/CSPLayer.py +++ /dev/null @@ -1,148 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule - - -class DarknetBottleneck(nn.Module): - """The basic bottleneck block used in Darknet. - - Each ResBlock consists of two ConvModules and the input is added to the - final output. Each ConvModule is composed of Conv, BN, and LeakyReLU. - The first convLayer has filter size of 1x1 and the second one has the - filter size of 3x3. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - expansion (int): The kernel size of the convolution. Default: 0.5 - add_identity (bool): Whether to add identity to the out. - Default: True - use_depthwise (bool): Whether to use depthwise separable convolution. - Default: False - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish'). - """ - - def __init__(self, - in_channels, - out_channels, - expansion=0.5, - add_identity=True, - use_depthwise=False, - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - init_cfg=None): - super().__init__() - hidden_channels = int(out_channels * expansion) - conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule - self.conv1 = ConvModule( - in_channels, - hidden_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.conv2 = conv( - hidden_channels, - out_channels, - 3, - stride=1, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.add_identity = \ - add_identity and in_channels == out_channels - - def forward(self, x): - identity = x - out = self.conv1(x) - out = self.conv2(out) - - if self.add_identity: - return out + identity - else: - return out - - -class CSPLayer(nn.Module): - """Cross Stage Partial Layer. - - Args: - in_channels (int): The input channels of the CSP layer. - out_channels (int): The output channels of the CSP layer. - expand_ratio (float): Ratio to adjust the number of channels of the - hidden layer. Default: 0.5 - num_blocks (int): Number of blocks. Default: 1 - add_identity (bool): Whether to add identity in blocks. - Default: True - use_depthwise (bool): Whether to depthwise separable convolution in - blocks. Default: False - conv_cfg (dict, optional): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN') - act_cfg (dict): Config dict for activation layer. - Default: dict(type='Swish') - """ - - def __init__(self, - in_channels, - out_channels, - expand_ratio=0.5, - num_blocks=1, - add_identity=True, - use_depthwise=False, - conv_cfg=None, - norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), - act_cfg=dict(type='Swish'), - init_cfg=None): - super().__init__() - mid_channels = int(out_channels * expand_ratio) - self.main_conv = ConvModule( - in_channels, - mid_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.short_conv = ConvModule( - in_channels, - mid_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.final_conv = ConvModule( - 2 * mid_channels, - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - self.blocks = nn.Sequential(*[ - DarknetBottleneck( - mid_channels, - mid_channels, - 1.0, - add_identity, - use_depthwise, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) for _ in range(num_blocks) - ]) - - def forward(self, x): - x_short = self.short_conv(x) - - x_main = self.main_conv(x) - x_main = self.blocks(x_main) - - x_final = torch.cat((x_main, x_short), dim=1) - return self.final_conv(x_final) \ No newline at end of file diff --git a/spaces/KyanChen/FunSR/models/cnn_models/common.py b/spaces/KyanChen/FunSR/models/cnn_models/common.py deleted file mode 100644 index af82b921c526b369a838acbd97080f81b2a5ea42..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/cnn_models/common.py +++ /dev/null @@ -1,140 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from torch.autograd import Variable - -def default_conv(in_channels, out_channels, kernel_size, bias=True): - return nn.Conv2d( - in_channels, out_channels, kernel_size, - padding=(kernel_size//2), bias=bias) - -class MeanShift(nn.Conv2d): - def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1): - super(MeanShift, self).__init__(3, 3, kernel_size=1) - std = torch.Tensor(rgb_std) - self.weight.data = torch.eye(3).view(3, 3, 1, 1) - self.weight.data.div_(std.view(3, 1, 1, 1)) - self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) - self.bias.data.div_(std) - self.requires_grad = False - -class BasicBlock(nn.Sequential): - def __init__( - self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True, - bn=False, act=nn.ReLU(True)): - - m = [conv(in_channels, out_channels, kernel_size, bias=bias)] - if bn: - m.append(nn.BatchNorm2d(out_channels)) - if act is not None: - m.append(act) - super(BasicBlock, self).__init__(*m) - -class ResBlock(nn.Module): - def __init__( - self, conv, n_feat, kernel_size, - bias=True, bn=False, act=nn.ReLU(True), res_scale=1): - - super(ResBlock, self).__init__() - m = [] - for i in range(2): - m.append(conv(n_feat, n_feat, kernel_size, bias=bias)) - if bn: m.append(nn.BatchNorm2d(n_feat)) - if i == 0: m.append(act) - - self.body = nn.Sequential(*m) - self.res_scale = res_scale - - def forward(self, x): - res = self.body(x).mul(self.res_scale) - res += x - - return res - - -class Upsampler(nn.Sequential): - def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True): - - m = [] - if (scale & (scale - 1)) == 0: # Is scale = 2^n? - for _ in range(int(math.log(scale, 2))): - m.append(conv(n_feat, 4 * n_feat, 3, bias)) - m.append(nn.PixelShuffle(2)) - if bn: m.append(nn.BatchNorm2d(n_feat)) - if act: m.append(act()) - elif scale == 3: - m.append(conv(n_feat, 9 * n_feat, 3, bias)) - m.append(nn.PixelShuffle(3)) - if bn: m.append(nn.BatchNorm2d(n_feat)) - if act: m.append(act()) - else: - raise NotImplementedError - - super(Upsampler, self).__init__(*m) - - -class DownBlock(nn.Module): - def __init__(self, scale): - super().__init__() - - self.scale = scale - - def forward(self, x): - n, c, h, w = x.size() - x = x.view(n, c, h//self.scale, self.scale, w//self.scale, self.scale) - x = x.permute(0, 3, 5, 1, 2, 4).contiguous() - x = x.view(n, c * (self.scale**2), h//self.scale, w//self.scale) - return x - -# NONLocalBlock2D -# ref: https://github.com/AlexHex7/Non-local_pytorch/blob/master/Non-Local_pytorch_0.4.1_to_1.1.0/lib/non_local_dot_product.py -# ref: https://github.com/yulunzhang/RNAN/blob/master/SR/code/model/common.py -class NonLocalBlock2D(nn.Module): - def __init__(self, in_channels, inter_channels): - super(NonLocalBlock2D, self).__init__() - - self.in_channels = in_channels - self.inter_channels = inter_channels - - self.g = nn.Conv2d(in_channels=in_channels, out_channels=inter_channels, - kernel_size=1, stride=1, padding=0) - self.W = nn.Conv2d(in_channels=inter_channels, out_channels=in_channels, - kernel_size=1, stride=1, padding=0) - nn.init.constant_(self.W.weight, 0) - nn.init.constant_(self.W.bias, 0) - - self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, - kernel_size=1, stride=1, padding=0) - self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, - kernel_size=1, stride=1, padding=0) - - def forward(self, x): - - batch_size = x.size(0) - - g_x = self.g(x).view(batch_size, self.inter_channels, -1) - g_x = g_x.permute(0, 2, 1) - - theta_x = self.theta(x).view(batch_size, self.inter_channels, -1) - theta_x = theta_x.permute(0, 2, 1) - - phi_x = self.phi(x).view(batch_size, self.inter_channels, -1) - f = torch.matmul(theta_x, phi_x) - - # use dot production - # N = f.size(-1) - # f_div_C = f / N - - # use embedding gaussian - f_div_C = F.softmax(f, dim=-1) - - y = torch.matmul(f_div_C, g_x) - y = y.permute(0, 2, 1).contiguous() - y = y.view(batch_size, self.inter_channels, *x.size()[2:]) - W_y = self.W(y) - z = W_y + x - - return z \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/lvis_metric.py b/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/lvis_metric.py deleted file mode 100644 index e4dd6141c0e3f94758a040fd2e2a72ea43ea9b63..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/evaluation/metrics/lvis_metric.py +++ /dev/null @@ -1,364 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import itertools -import os.path as osp -import tempfile -import warnings -from collections import OrderedDict -from typing import Dict, List, Optional, Sequence, Union - -import numpy as np -from mmengine.fileio import get_local_path -from mmengine.logging import MMLogger -from terminaltables import AsciiTable - -from mmdet.registry import METRICS -from mmdet.structures.mask import encode_mask_results -from ..functional import eval_recalls -from .coco_metric import CocoMetric - -try: - import lvis - if getattr(lvis, '__version__', '0') >= '10.5.3': - warnings.warn( - 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 - UserWarning) - from lvis import LVIS, LVISEval, LVISResults -except ImportError: - lvis = None - LVISEval = None - LVISResults = None - - -@METRICS.register_module() -class LVISMetric(CocoMetric): - """LVIS evaluation metric. - - Args: - ann_file (str, optional): Path to the coco format annotation file. - If not specified, ground truth annotations from the dataset will - be converted to coco format. Defaults to None. - metric (str | List[str]): Metrics to be evaluated. Valid metrics - include 'bbox', 'segm', 'proposal', and 'proposal_fast'. - Defaults to 'bbox'. - classwise (bool): Whether to evaluate the metric class-wise. - Defaults to False. - proposal_nums (Sequence[int]): Numbers of proposals to be evaluated. - Defaults to (100, 300, 1000). - iou_thrs (float | List[float], optional): IoU threshold to compute AP - and AR. If not specified, IoUs from 0.5 to 0.95 will be used. - Defaults to None. - metric_items (List[str], optional): Metric result names to be - recorded in the evaluation result. Defaults to None. - format_only (bool): Format the output results without perform - evaluation. It is useful when you want to format the result - to a specific format and submit it to the test server. - Defaults to False. - outfile_prefix (str, optional): The prefix of json files. It includes - the file path and the prefix of filename, e.g., "a/b/prefix". - If not specified, a temp file will be created. Defaults to None. - collect_device (str): Device name used for collecting results from - different ranks during distributed training. Must be 'cpu' or - 'gpu'. Defaults to 'cpu'. - prefix (str, optional): The prefix that will be added in the metric - names to disambiguate homonymous metrics of different evaluators. - If prefix is not provided in the argument, self.default_prefix - will be used instead. Defaults to None. - file_client_args (dict, optional): Arguments to instantiate the - corresponding backend in mmdet <= 3.0.0rc6. Defaults to None. - backend_args (dict, optional): Arguments to instantiate the - corresponding backend. Defaults to None. - """ - - default_prefix: Optional[str] = 'lvis' - - def __init__(self, - ann_file: Optional[str] = None, - metric: Union[str, List[str]] = 'bbox', - classwise: bool = False, - proposal_nums: Sequence[int] = (100, 300, 1000), - iou_thrs: Optional[Union[float, Sequence[float]]] = None, - metric_items: Optional[Sequence[str]] = None, - format_only: bool = False, - outfile_prefix: Optional[str] = None, - collect_device: str = 'cpu', - prefix: Optional[str] = None, - file_client_args: dict = None, - backend_args: dict = None) -> None: - if lvis is None: - raise RuntimeError( - 'Package lvis is not installed. Please run "pip install ' - 'git+https://github.com/lvis-dataset/lvis-api.git".') - super().__init__(collect_device=collect_device, prefix=prefix) - # coco evaluation metrics - self.metrics = metric if isinstance(metric, list) else [metric] - allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] - for metric in self.metrics: - if metric not in allowed_metrics: - raise KeyError( - "metric should be one of 'bbox', 'segm', 'proposal', " - f"'proposal_fast', but got {metric}.") - - # do class wise evaluation, default False - self.classwise = classwise - - # proposal_nums used to compute recall or precision. - self.proposal_nums = list(proposal_nums) - - # iou_thrs used to compute recall or precision. - if iou_thrs is None: - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - self.iou_thrs = iou_thrs - self.metric_items = metric_items - self.format_only = format_only - if self.format_only: - assert outfile_prefix is not None, 'outfile_prefix must be not' - 'None when format_only is True, otherwise the result files will' - 'be saved to a temp directory which will be cleaned up at the end.' - - self.outfile_prefix = outfile_prefix - self.backend_args = backend_args - if file_client_args is not None: - raise RuntimeError( - 'The `file_client_args` is deprecated, ' - 'please use `backend_args` instead, please refer to' - 'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501 - ) - - # if ann_file is not specified, - # initialize lvis api with the converted dataset - if ann_file is not None: - with get_local_path( - ann_file, backend_args=self.backend_args) as local_path: - self._lvis_api = LVIS(local_path) - else: - self._lvis_api = None - - # handle dataset lazy init - self.cat_ids = None - self.img_ids = None - - def fast_eval_recall(self, - results: List[dict], - proposal_nums: Sequence[int], - iou_thrs: Sequence[float], - logger: Optional[MMLogger] = None) -> np.ndarray: - """Evaluate proposal recall with LVIS's fast_eval_recall. - - Args: - results (List[dict]): Results of the dataset. - proposal_nums (Sequence[int]): Proposal numbers used for - evaluation. - iou_thrs (Sequence[float]): IoU thresholds used for evaluation. - logger (MMLogger, optional): Logger used for logging the recall - summary. - Returns: - np.ndarray: Averaged recall results. - """ - gt_bboxes = [] - pred_bboxes = [result['bboxes'] for result in results] - for i in range(len(self.img_ids)): - ann_ids = self._lvis_api.get_ann_ids(img_ids=[self.img_ids[i]]) - ann_info = self._lvis_api.load_anns(ann_ids) - if len(ann_info) == 0: - gt_bboxes.append(np.zeros((0, 4))) - continue - bboxes = [] - for ann in ann_info: - x1, y1, w, h = ann['bbox'] - bboxes.append([x1, y1, x1 + w, y1 + h]) - bboxes = np.array(bboxes, dtype=np.float32) - if bboxes.shape[0] == 0: - bboxes = np.zeros((0, 4)) - gt_bboxes.append(bboxes) - - recalls = eval_recalls( - gt_bboxes, pred_bboxes, proposal_nums, iou_thrs, logger=logger) - ar = recalls.mean(axis=1) - return ar - - # TODO: data_batch is no longer needed, consider adjusting the - # parameter position - def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: - """Process one batch of data samples and predictions. The processed - results should be stored in ``self.results``, which will be used to - compute the metrics when all batches have been processed. - - Args: - data_batch (dict): A batch of data from the dataloader. - data_samples (Sequence[dict]): A batch of data samples that - contain annotations and predictions. - """ - for data_sample in data_samples: - result = dict() - pred = data_sample['pred_instances'] - result['img_id'] = data_sample['img_id'] - result['bboxes'] = pred['bboxes'].cpu().numpy() - result['scores'] = pred['scores'].cpu().numpy() - result['labels'] = pred['labels'].cpu().numpy() - # encode mask to RLE - if 'masks' in pred: - result['masks'] = encode_mask_results( - pred['masks'].detach().cpu().numpy()) - # some detectors use different scores for bbox and mask - if 'mask_scores' in pred: - result['mask_scores'] = pred['mask_scores'].cpu().numpy() - - # parse gt - gt = dict() - gt['width'] = data_sample['ori_shape'][1] - gt['height'] = data_sample['ori_shape'][0] - gt['img_id'] = data_sample['img_id'] - if self._lvis_api is None: - # TODO: Need to refactor to support LoadAnnotations - assert 'instances' in data_sample, \ - 'ground truth is required for evaluation when ' \ - '`ann_file` is not provided' - gt['anns'] = data_sample['instances'] - # add converted result to the results list - self.results.append((gt, result)) - - def compute_metrics(self, results: list) -> Dict[str, float]: - """Compute the metrics from processed results. - - Args: - results (list): The processed results of each batch. - - Returns: - Dict[str, float]: The computed metrics. The keys are the names of - the metrics, and the values are corresponding results. - """ - logger: MMLogger = MMLogger.get_current_instance() - - # split gt and prediction list - gts, preds = zip(*results) - - tmp_dir = None - if self.outfile_prefix is None: - tmp_dir = tempfile.TemporaryDirectory() - outfile_prefix = osp.join(tmp_dir.name, 'results') - else: - outfile_prefix = self.outfile_prefix - - if self._lvis_api is None: - # use converted gt json file to initialize coco api - logger.info('Converting ground truth to coco format...') - coco_json_path = self.gt_to_coco_json( - gt_dicts=gts, outfile_prefix=outfile_prefix) - self._lvis_api = LVIS(coco_json_path) - - # handle lazy init - if self.cat_ids is None: - self.cat_ids = self._lvis_api.get_cat_ids() - if self.img_ids is None: - self.img_ids = self._lvis_api.get_img_ids() - - # convert predictions to coco format and dump to json file - result_files = self.results2json(preds, outfile_prefix) - - eval_results = OrderedDict() - if self.format_only: - logger.info('results are saved in ' - f'{osp.dirname(outfile_prefix)}') - return eval_results - - lvis_gt = self._lvis_api - - for metric in self.metrics: - logger.info(f'Evaluating {metric}...') - - # TODO: May refactor fast_eval_recall to an independent metric? - # fast eval recall - if metric == 'proposal_fast': - ar = self.fast_eval_recall( - preds, self.proposal_nums, self.iou_thrs, logger=logger) - log_msg = [] - for i, num in enumerate(self.proposal_nums): - eval_results[f'AR@{num}'] = ar[i] - log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') - log_msg = ''.join(log_msg) - logger.info(log_msg) - continue - - try: - lvis_dt = LVISResults(lvis_gt, result_files[metric]) - except IndexError: - logger.info( - 'The testing results of the whole dataset is empty.') - break - - iou_type = 'bbox' if metric == 'proposal' else metric - lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) - lvis_eval.params.imgIds = self.img_ids - metric_items = self.metric_items - if metric == 'proposal': - lvis_eval.params.useCats = 0 - lvis_eval.params.maxDets = list(self.proposal_nums) - lvis_eval.evaluate() - lvis_eval.accumulate() - lvis_eval.summarize() - if metric_items is None: - metric_items = ['AR@300', 'ARs@300', 'ARm@300', 'ARl@300'] - for k, v in lvis_eval.get_results().items(): - if k in metric_items: - val = float('{:.3f}'.format(float(v))) - eval_results[k] = val - - else: - lvis_eval.evaluate() - lvis_eval.accumulate() - lvis_eval.summarize() - lvis_results = lvis_eval.get_results() - if self.classwise: # Compute per-category AP - # Compute per-category AP - # from https://github.com/facebookresearch/detectron2/ - precisions = lvis_eval.eval['precision'] - # precision: (iou, recall, cls, area range, max dets) - assert len(self.cat_ids) == precisions.shape[2] - - results_per_category = [] - for idx, catId in enumerate(self.cat_ids): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - # the dimensions of precisions are - # [num_thrs, num_recalls, num_cats, num_area_rngs] - nm = self._lvis_api.load_cats([catId])[0] - precision = precisions[:, :, idx, 0] - precision = precision[precision > -1] - if precision.size: - ap = np.mean(precision) - else: - ap = float('nan') - results_per_category.append( - (f'{nm["name"]}', f'{float(ap):0.3f}')) - eval_results[f'{nm["name"]}_precision'] = round(ap, 3) - - num_columns = min(6, len(results_per_category) * 2) - results_flatten = list( - itertools.chain(*results_per_category)) - headers = ['category', 'AP'] * (num_columns // 2) - results_2d = itertools.zip_longest(*[ - results_flatten[i::num_columns] - for i in range(num_columns) - ]) - table_data = [headers] - table_data += [result for result in results_2d] - table = AsciiTable(table_data) - logger.info('\n' + table.table) - - if metric_items is None: - metric_items = [ - 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'APr', - 'APc', 'APf' - ] - - for k, v in lvis_results.items(): - if k in metric_items: - key = '{}_{}'.format(metric, k) - val = float('{:.3f}'.format(float(v))) - eval_results[key] = val - - lvis_eval.print_results() - if tmp_dir is not None: - tmp_dir.cleanup() - return eval_results diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/train/process_ckpt.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/train/process_ckpt.py deleted file mode 100644 index 7926bba45a64db895d413138019ede985043b465..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/train/process_ckpt.py +++ /dev/null @@ -1,260 +0,0 @@ -import os -import traceback -from collections import OrderedDict - -import torch - -from assets.i18n.i18n import I18nAuto - -i18n = I18nAuto() - - -def savee(ckpt, sr, if_f0, name, epoch, version, hps): - try: - opt = OrderedDict() - opt["weight"] = {} - for key in ckpt.keys(): - if "enc_q" in key: - continue - opt["weight"][key] = ckpt[key].half() - opt["config"] = [ - hps.data.filter_length // 2 + 1, - 32, - hps.model.inter_channels, - hps.model.hidden_channels, - hps.model.filter_channels, - hps.model.n_heads, - hps.model.n_layers, - hps.model.kernel_size, - hps.model.p_dropout, - hps.model.resblock, - hps.model.resblock_kernel_sizes, - hps.model.resblock_dilation_sizes, - hps.model.upsample_rates, - hps.model.upsample_initial_channel, - hps.model.upsample_kernel_sizes, - hps.model.spk_embed_dim, - hps.model.gin_channels, - hps.data.sampling_rate, - ] - opt["info"] = "%sepoch" % epoch - opt["sr"] = sr - opt["f0"] = if_f0 - opt["version"] = version - torch.save(opt, "logs/weights/%s.pth" % name) - return "Success." - except: - return traceback.format_exc() - - -def show_info(path): - try: - a = torch.load(path, map_location="cpu") - return "模型信息:%s\n采样率:%s\n模型是否输入音高引导:%s\n版本:%s" % ( - a.get("info", "None"), - a.get("sr", "None"), - a.get("f0", "None"), - a.get("version", "None"), - ) - except: - return traceback.format_exc() - - -def extract_small_model(path, name, sr, if_f0, info, version): - try: - ckpt = torch.load(path, map_location="cpu") - if "model" in ckpt: - ckpt = ckpt["model"] - opt = OrderedDict() - opt["weight"] = {} - for key in ckpt.keys(): - if "enc_q" in key: - continue - opt["weight"][key] = ckpt[key].half() - if sr == "40k": - opt["config"] = [ - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 10, 2, 2], - 512, - [16, 16, 4, 4], - 109, - 256, - 40000, - ] - elif sr == "48k": - if version == "v1": - opt["config"] = [ - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 6, 2, 2, 2], - 512, - [16, 16, 4, 4, 4], - 109, - 256, - 48000, - ] - else: - opt["config"] = [ - 1025, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [12, 10, 2, 2], - 512, - [24, 20, 4, 4], - 109, - 256, - 48000, - ] - elif sr == "32k": - if version == "v1": - opt["config"] = [ - 513, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 4, 2, 2, 2], - 512, - [16, 16, 4, 4, 4], - 109, - 256, - 32000, - ] - else: - opt["config"] = [ - 513, - 32, - 192, - 192, - 768, - 2, - 6, - 3, - 0, - "1", - [3, 7, 11], - [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - [10, 8, 2, 2], - 512, - [20, 16, 4, 4], - 109, - 256, - 32000, - ] - if info == "": - info = "Extracted model." - opt["info"] = info - opt["version"] = version - opt["sr"] = sr - opt["f0"] = int(if_f0) - torch.save(opt, "logs/weights/%s.pth" % name) - return "Success." - except: - return traceback.format_exc() - - -def change_info(path, info, name): - try: - ckpt = torch.load(path, map_location="cpu") - ckpt["info"] = info - if name == "": - name = os.path.basename(path) - torch.save(ckpt, "logs/weights/%s" % name) - return "Success." - except: - return traceback.format_exc() - - -def merge(path1, path2, alpha1, sr, f0, info, name, version): - try: - - def extract(ckpt): - a = ckpt["model"] - opt = OrderedDict() - opt["weight"] = {} - for key in a.keys(): - if "enc_q" in key: - continue - opt["weight"][key] = a[key] - return opt - - ckpt1 = torch.load(path1, map_location="cpu") - ckpt2 = torch.load(path2, map_location="cpu") - cfg = ckpt1["config"] - if "model" in ckpt1: - ckpt1 = extract(ckpt1) - else: - ckpt1 = ckpt1["weight"] - if "model" in ckpt2: - ckpt2 = extract(ckpt2) - else: - ckpt2 = ckpt2["weight"] - if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())): - return "Fail to merge the models. The model architectures are not the same." - opt = OrderedDict() - opt["weight"] = {} - for key in ckpt1.keys(): - # try: - if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape: - min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0]) - opt["weight"][key] = ( - alpha1 * (ckpt1[key][:min_shape0].float()) - + (1 - alpha1) * (ckpt2[key][:min_shape0].float()) - ).half() - else: - opt["weight"][key] = ( - alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float()) - ).half() - # except: - # pdb.set_trace() - opt["config"] = cfg - """ - if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000] - elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000] - elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000] - """ - opt["sr"] = sr - opt["f0"] = 1 if f0 == i18n("是") else 0 - opt["version"] = version - opt["info"] = info - torch.save(opt, "logs/weights/%s.pth" % name) - return "Success." - except: - return traceback.format_exc() diff --git a/spaces/Lelliam/text_generator1/README.md b/spaces/Lelliam/text_generator1/README.md deleted file mode 100644 index 5ba839f023a2b3ffba1860867c2e2e3a2c5b00b8..0000000000000000000000000000000000000000 --- a/spaces/Lelliam/text_generator1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text Generator1 -emoji: 🏃 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Lianjd/stock_dashboard/backtrader/feeds/ibdata.py b/spaces/Lianjd/stock_dashboard/backtrader/feeds/ibdata.py deleted file mode 100644 index 6301f2c5c6c61ccfd41d897bd5df85a6c6658f9a..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/feeds/ibdata.py +++ /dev/null @@ -1,704 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import datetime - -import backtrader as bt -from backtrader.feed import DataBase -from backtrader import TimeFrame, date2num, num2date -from backtrader.utils.py3 import (integer_types, queue, string_types, - with_metaclass) -from backtrader.metabase import MetaParams -from backtrader.stores import ibstore - - -class MetaIBData(DataBase.__class__): - def __init__(cls, name, bases, dct): - '''Class has already been created ... register''' - # Initialize the class - super(MetaIBData, cls).__init__(name, bases, dct) - - # Register with the store - ibstore.IBStore.DataCls = cls - - -class IBData(with_metaclass(MetaIBData, DataBase)): - '''Interactive Brokers Data Feed. - - Supports the following contract specifications in parameter ``dataname``: - - - TICKER # Stock type and SMART exchange - - TICKER-STK # Stock and SMART exchange - - TICKER-STK-EXCHANGE # Stock - - TICKER-STK-EXCHANGE-CURRENCY # Stock - - - TICKER-CFD # CFD and SMART exchange - - TICKER-CFD-EXCHANGE # CFD - - TICKER-CDF-EXCHANGE-CURRENCY # Stock - - - TICKER-IND-EXCHANGE # Index - - TICKER-IND-EXCHANGE-CURRENCY # Index - - - TICKER-YYYYMM-EXCHANGE # Future - - TICKER-YYYYMM-EXCHANGE-CURRENCY # Future - - TICKER-YYYYMM-EXCHANGE-CURRENCY-MULT # Future - - TICKER-FUT-EXCHANGE-CURRENCY-YYYYMM-MULT # Future - - - TICKER-YYYYMM-EXCHANGE-CURRENCY-STRIKE-RIGHT # FOP - - TICKER-YYYYMM-EXCHANGE-CURRENCY-STRIKE-RIGHT-MULT # FOP - - TICKER-FOP-EXCHANGE-CURRENCY-YYYYMM-STRIKE-RIGHT # FOP - - TICKER-FOP-EXCHANGE-CURRENCY-YYYYMM-STRIKE-RIGHT-MULT # FOP - - - CUR1.CUR2-CASH-IDEALPRO # Forex - - - TICKER-YYYYMMDD-EXCHANGE-CURRENCY-STRIKE-RIGHT # OPT - - TICKER-YYYYMMDD-EXCHANGE-CURRENCY-STRIKE-RIGHT-MULT # OPT - - TICKER-OPT-EXCHANGE-CURRENCY-YYYYMMDD-STRIKE-RIGHT # OPT - - TICKER-OPT-EXCHANGE-CURRENCY-YYYYMMDD-STRIKE-RIGHT-MULT # OPT - - Params: - - - ``sectype`` (default: ``STK``) - - Default value to apply as *security type* if not provided in the - ``dataname`` specification - - - ``exchange`` (default: ``SMART``) - - Default value to apply as *exchange* if not provided in the - ``dataname`` specification - - - ``currency`` (default: ``''``) - - Default value to apply as *currency* if not provided in the - ``dataname`` specification - - - ``historical`` (default: ``False``) - - If set to ``True`` the data feed will stop after doing the first - download of data. - - The standard data feed parameters ``fromdate`` and ``todate`` will be - used as reference. - - The data feed will make multiple requests if the requested duration is - larger than the one allowed by IB given the timeframe/compression - chosen for the data. - - - ``what`` (default: ``None``) - - If ``None`` the default for different assets types will be used for - historical data requests: - - - 'BID' for CASH assets - - 'TRADES' for any other - - Use 'ASK' for the Ask quote of cash assets - - Check the IB API docs if another value is wished - - - ``rtbar`` (default: ``False``) - - If ``True`` the ``5 Seconds Realtime bars`` provided by Interactive - Brokers will be used as the smalles tick. According to the - documentation they correspond to real-time values (once collated and - curated by IB) - - If ``False`` then the ``RTVolume`` prices will be used, which are based - on receiving ticks. In the case of ``CASH`` assets (like for example - EUR.JPY) ``RTVolume`` will always be used and from it the ``bid`` price - (industry de-facto standard with IB according to the literature - scattered over the Internet) - - Even if set to ``True``, if the data is resampled/kept to a - timeframe/compression below Seconds/5, no real time bars will be used, - because IB doesn't serve them below that level - - - ``qcheck`` (default: ``0.5``) - - Time in seconds to wake up if no data is received to give a chance to - resample/replay packets properly and pass notifications up the chain - - - ``backfill_start`` (default: ``True``) - - Perform backfilling at the start. The maximum possible historical data - will be fetched in a single request. - - - ``backfill`` (default: ``True``) - - Perform backfilling after a disconnection/reconnection cycle. The gap - duration will be used to download the smallest possible amount of data - - - ``backfill_from`` (default: ``None``) - - An additional data source can be passed to do an initial layer of - backfilling. Once the data source is depleted and if requested, - backfilling from IB will take place. This is ideally meant to backfill - from already stored sources like a file on disk, but not limited to. - - - ``latethrough`` (default: ``False``) - - If the data source is resampled/replayed, some ticks may come in too - late for the already delivered resampled/replayed bar. If this is - ``True`` those ticks will bet let through in any case. - - Check the Resampler documentation to see who to take those ticks into - account. - - This can happen especially if ``timeoffset`` is set to ``False`` in - the ``IBStore`` instance and the TWS server time is not in sync with - that of the local computer - - - ``tradename`` (default: ``None``) - Useful for some specific cases like ``CFD`` in which prices are offered - by one asset and trading happens in a different onel - - - SPY-STK-SMART-USD -> SP500 ETF (will be specified as ``dataname``) - - - SPY-CFD-SMART-USD -> which is the corresponding CFD which offers not - price tracking but in this case will be the trading asset (specified - as ``tradename``) - - The default values in the params are the to allow things like ```TICKER``, - to which the parameter ``sectype`` (default: ``STK``) and ``exchange`` - (default: ``SMART``) are applied. - - Some assets like ``AAPL`` need full specification including ``currency`` - (default: '') whereas others like ``TWTR`` can be simply passed as it is. - - - ``AAPL-STK-SMART-USD`` would be the full specification for dataname - - Or else: ``IBData`` as ``IBData(dataname='AAPL', currency='USD')`` - which uses the default values (``STK`` and ``SMART``) and overrides - the currency to be ``USD`` - ''' - params = ( - ('sectype', 'STK'), # usual industry value - ('exchange', 'SMART'), # usual industry value - ('currency', ''), - ('rtbar', False), # use RealTime 5 seconds bars - ('historical', False), # only historical download - ('what', None), # historical - what to show - ('useRTH', False), # historical - download only Regular Trading Hours - ('qcheck', 0.5), # timeout in seconds (float) to check for events - ('backfill_start', True), # do backfilling at the start - ('backfill', True), # do backfilling when reconnecting - ('backfill_from', None), # additional data source to do backfill from - ('latethrough', False), # let late samples through - ('tradename', None), # use a different asset as order target - ) - - _store = ibstore.IBStore - - # Minimum size supported by real-time bars - RTBAR_MINSIZE = (TimeFrame.Seconds, 5) - - # States for the Finite State Machine in _load - _ST_FROM, _ST_START, _ST_LIVE, _ST_HISTORBACK, _ST_OVER = range(5) - - def _timeoffset(self): - return self.ib.timeoffset() - - def _gettz(self): - # If no object has been provided by the user and a timezone can be - # found via contractdtails, then try to get it from pytz, which may or - # may not be available. - - # The timezone specifications returned by TWS seem to be abbreviations - # understood by pytz, but the full list which TWS may return is not - # documented and one of the abbreviations may fail - tzstr = isinstance(self.p.tz, string_types) - if self.p.tz is not None and not tzstr: - return bt.utils.date.Localizer(self.p.tz) - - if self.contractdetails is None: - return None # nothing can be done - - try: - import pytz # keep the import very local - except ImportError: - return None # nothing can be done - - tzs = self.p.tz if tzstr else self.contractdetails.m_timeZoneId - - if tzs == 'CST': # reported by TWS, not compatible with pytz. patch it - tzs = 'CST6CDT' - - try: - tz = pytz.timezone(tzs) - except pytz.UnknownTimeZoneError: - return None # nothing can be done - - # contractdetails there, import ok, timezone found, return it - return tz - - def islive(self): - '''Returns ``True`` to notify ``Cerebro`` that preloading and runonce - should be deactivated''' - return not self.p.historical - - def __init__(self, **kwargs): - self.ib = self._store(**kwargs) - self.precontract = self.parsecontract(self.p.dataname) - self.pretradecontract = self.parsecontract(self.p.tradename) - - def setenvironment(self, env): - '''Receives an environment (cerebro) and passes it over to the store it - belongs to''' - super(IBData, self).setenvironment(env) - env.addstore(self.ib) - - def parsecontract(self, dataname): - '''Parses dataname generates a default contract''' - # Set defaults for optional tokens in the ticker string - if dataname is None: - return None - - exch = self.p.exchange - curr = self.p.currency - expiry = '' - strike = 0.0 - right = '' - mult = '' - - # split the ticker string - tokens = iter(dataname.split('-')) - - # Symbol and security type are compulsory - symbol = next(tokens) - try: - sectype = next(tokens) - except StopIteration: - sectype = self.p.sectype - - # security type can be an expiration date - if sectype.isdigit(): - expiry = sectype # save the expiration ate - - if len(sectype) == 6: # YYYYMM - sectype = 'FUT' - else: # Assume OPTIONS - YYYYMMDD - sectype = 'OPT' - - if sectype == 'CASH': # need to address currency for Forex - symbol, curr = symbol.split('.') - - # See if the optional tokens were provided - try: - exch = next(tokens) # on exception it will be the default - curr = next(tokens) # on exception it will be the default - - if sectype == 'FUT': - if not expiry: - expiry = next(tokens) - mult = next(tokens) - - # Try to see if this is FOP - Futures on OPTIONS - right = next(tokens) - # if still here this is a FOP and not a FUT - sectype = 'FOP' - strike, mult = float(mult), '' # assign to strike and void - - mult = next(tokens) # try again to see if there is any - - elif sectype == 'OPT': - if not expiry: - expiry = next(tokens) - strike = float(next(tokens)) # on exception - default - right = next(tokens) # on exception it will be the default - - mult = next(tokens) # ?? no harm in any case - - except StopIteration: - pass - - # Make the initial contract - precon = self.ib.makecontract( - symbol=symbol, sectype=sectype, exch=exch, curr=curr, - expiry=expiry, strike=strike, right=right, mult=mult) - - return precon - - def start(self): - '''Starts the IB connecction and gets the real contract and - contractdetails if it exists''' - super(IBData, self).start() - # Kickstart store and get queue to wait on - self.qlive = self.ib.start(data=self) - self.qhist = None - - self._usertvol = not self.p.rtbar - tfcomp = (self._timeframe, self._compression) - if tfcomp < self.RTBAR_MINSIZE: - # Requested timeframe/compression not supported by rtbars - self._usertvol = True - - self.contract = None - self.contractdetails = None - self.tradecontract = None - self.tradecontractdetails = None - - if self.p.backfill_from is not None: - self._state = self._ST_FROM - self.p.backfill_from.setenvironment(self._env) - self.p.backfill_from._start() - else: - self._state = self._ST_START # initial state for _load - self._statelivereconn = False # if reconnecting in live state - self._subcription_valid = False # subscription state - self._storedmsg = dict() # keep pending live message (under None) - - if not self.ib.connected(): - return - - self.put_notification(self.CONNECTED) - # get real contract details with real conId (contractId) - cds = self.ib.getContractDetails(self.precontract, maxcount=1) - if cds is not None: - cdetails = cds[0] - self.contract = cdetails.contractDetails.m_summary - self.contractdetails = cdetails.contractDetails - else: - # no contract can be found (or many) - self.put_notification(self.DISCONNECTED) - return - - if self.pretradecontract is None: - # no different trading asset - default to standard asset - self.tradecontract = self.contract - self.tradecontractdetails = self.contractdetails - else: - # different target asset (typical of some CDS products) - # use other set of details - cds = self.ib.getContractDetails(self.pretradecontract, maxcount=1) - if cds is not None: - cdetails = cds[0] - self.tradecontract = cdetails.contractDetails.m_summary - self.tradecontractdetails = cdetails.contractDetails - else: - # no contract can be found (or many) - self.put_notification(self.DISCONNECTED) - return - - if self._state == self._ST_START: - self._start_finish() # to finish initialization - self._st_start() - - def stop(self): - '''Stops and tells the store to stop''' - super(IBData, self).stop() - self.ib.stop() - - def reqdata(self): - '''request real-time data. checks cash vs non-cash) and param useRT''' - if self.contract is None or self._subcription_valid: - return - - if self._usertvol: - self.qlive = self.ib.reqMktData(self.contract, self.p.what) - else: - self.qlive = self.ib.reqRealTimeBars(self.contract) - - self._subcription_valid = True - return self.qlive - - def canceldata(self): - '''Cancels Market Data subscription, checking asset type and rtbar''' - if self.contract is None: - return - - if self._usertvol: - self.ib.cancelMktData(self.qlive) - else: - self.ib.cancelRealTimeBars(self.qlive) - - def haslivedata(self): - return bool(self._storedmsg or self.qlive) - - def _load(self): - if self.contract is None or self._state == self._ST_OVER: - return False # nothing can be done - - while True: - if self._state == self._ST_LIVE: - try: - msg = (self._storedmsg.pop(None, None) or - self.qlive.get(timeout=self._qcheck)) - except queue.Empty: - if True: - return None - - # Code invalidated until further checking is done - if not self._statelivereconn: - return None # indicate timeout situation - - # Awaiting data and nothing came in - fake it up until now - dtend = self.num2date(date2num(datetime.datetime.utcnow())) - dtbegin = None - if len(self) > 1: - dtbegin = self.num2date(self.datetime[-1]) - - self.qhist = self.ib.reqHistoricalDataEx( - contract=self.contract, - enddate=dtend, begindate=dtbegin, - timeframe=self._timeframe, - compression=self._compression, - what=self.p.what, useRTH=self.p.useRTH, tz=self._tz, - sessionend=self.p.sessionend) - - if self._laststatus != self.DELAYED: - self.put_notification(self.DELAYED) - - self._state = self._ST_HISTORBACK - - self._statelivereconn = False - continue # to reenter the loop and hit st_historback - - if msg is None: # Conn broken during historical/backfilling - self._subcription_valid = False - self.put_notification(self.CONNBROKEN) - # Try to reconnect - if not self.ib.reconnect(resub=True): - self.put_notification(self.DISCONNECTED) - return False # failed - - self._statelivereconn = self.p.backfill - continue - - if msg == -354: - self.put_notification(self.NOTSUBSCRIBED) - return False - - elif msg == -1100: # conn broken - # Tell to wait for a message to do a backfill - # self._state = self._ST_DISCONN - self._subcription_valid = False - self._statelivereconn = self.p.backfill - continue - - elif msg == -1102: # conn broken/restored tickerId maintained - # The message may be duplicated - if not self._statelivereconn: - self._statelivereconn = self.p.backfill - continue - - elif msg == -1101: # conn broken/restored tickerId gone - # The message may be duplicated - self._subcription_valid = False - if not self._statelivereconn: - self._statelivereconn = self.p.backfill - self.reqdata() # resubscribe - continue - - elif msg == -10225: # Bust event occurred, current subscription is deactivated. - self._subcription_valid = False - if not self._statelivereconn: - self._statelivereconn = self.p.backfill - self.reqdata() # resubscribe - continue - - elif isinstance(msg, integer_types): - # Unexpected notification for historical data skip it - # May be a "not connected not yet processed" - self.put_notification(self.UNKNOWN, msg) - continue - - # Process the message according to expected return type - if not self._statelivereconn: - if self._laststatus != self.LIVE: - if self.qlive.qsize() <= 1: # very short live queue - self.put_notification(self.LIVE) - - if self._usertvol: - ret = self._load_rtvolume(msg) - else: - ret = self._load_rtbar(msg) - if ret: - return True - - # could not load bar ... go and get new one - continue - - # Fall through to processing reconnect - try to backfill - self._storedmsg[None] = msg # keep the msg - - # else do a backfill - if self._laststatus != self.DELAYED: - self.put_notification(self.DELAYED) - - dtend = None - if len(self) > 1: - # len == 1 ... forwarded for the 1st time - # get begin date in utc-like format like msg.datetime - dtbegin = num2date(self.datetime[-1]) - elif self.fromdate > float('-inf'): - dtbegin = num2date(self.fromdate) - else: # 1st bar and no begin set - # passing None to fetch max possible in 1 request - dtbegin = None - - dtend = msg.datetime if self._usertvol else msg.time - - self.qhist = self.ib.reqHistoricalDataEx( - contract=self.contract, enddate=dtend, begindate=dtbegin, - timeframe=self._timeframe, compression=self._compression, - what=self.p.what, useRTH=self.p.useRTH, tz=self._tz, - sessionend=self.p.sessionend) - - self._state = self._ST_HISTORBACK - self._statelivereconn = False # no longer in live - continue - - elif self._state == self._ST_HISTORBACK: - msg = self.qhist.get() - if msg is None: # Conn broken during historical/backfilling - # Situation not managed. Simply bail out - self._subcription_valid = False - self.put_notification(self.DISCONNECTED) - return False # error management cancelled the queue - - elif msg == -354: # Data not subscribed - self._subcription_valid = False - self.put_notification(self.NOTSUBSCRIBED) - return False - - elif msg == -420: # No permissions for the data - self._subcription_valid = False - self.put_notification(self.NOTSUBSCRIBED) - return False - - elif isinstance(msg, integer_types): - # Unexpected notification for historical data skip it - # May be a "not connected not yet processed" - self.put_notification(self.UNKNOWN, msg) - continue - - if msg.date is not None: - if self._load_rtbar(msg, hist=True): - return True # loading worked - - # the date is from overlapping historical request - continue - - # End of histdata - if self.p.historical: # only historical - self.put_notification(self.DISCONNECTED) - return False # end of historical - - # Live is also wished - go for it - self._state = self._ST_LIVE - continue - - elif self._state == self._ST_FROM: - if not self.p.backfill_from.next(): - # additional data source is consumed - self._state = self._ST_START - continue - - # copy lines of the same name - for alias in self.lines.getlinealiases(): - lsrc = getattr(self.p.backfill_from.lines, alias) - ldst = getattr(self.lines, alias) - - ldst[0] = lsrc[0] - - return True - - elif self._state == self._ST_START: - if not self._st_start(): - return False - - def _st_start(self): - if self.p.historical: - self.put_notification(self.DELAYED) - dtend = None - if self.todate < float('inf'): - dtend = num2date(self.todate) - - dtbegin = None - if self.fromdate > float('-inf'): - dtbegin = num2date(self.fromdate) - - self.qhist = self.ib.reqHistoricalDataEx( - contract=self.contract, enddate=dtend, begindate=dtbegin, - timeframe=self._timeframe, compression=self._compression, - what=self.p.what, useRTH=self.p.useRTH, tz=self._tz, - sessionend=self.p.sessionend) - - self._state = self._ST_HISTORBACK - return True # continue before - - # Live is requested - if not self.ib.reconnect(resub=True): - self.put_notification(self.DISCONNECTED) - self._state = self._ST_OVER - return False # failed - was so - - self._statelivereconn = self.p.backfill_start - if self.p.backfill_start: - self.put_notification(self.DELAYED) - - self._state = self._ST_LIVE - return True # no return before - implicit continue - - def _load_rtbar(self, rtbar, hist=False): - # A complete 5 second bar made of real-time ticks is delivered and - # contains open/high/low/close/volume prices - # The historical data has the same data but with 'date' instead of - # 'time' for datetime - dt = date2num(rtbar.time if not hist else rtbar.date) - if dt < self.lines.datetime[-1] and not self.p.latethrough: - return False # cannot deliver earlier than already delivered - - self.lines.datetime[0] = dt - # Put the tick into the bar - self.lines.open[0] = rtbar.open - self.lines.high[0] = rtbar.high - self.lines.low[0] = rtbar.low - self.lines.close[0] = rtbar.close - self.lines.volume[0] = rtbar.volume - self.lines.openinterest[0] = 0 - - return True - - def _load_rtvolume(self, rtvol): - # A single tick is delivered and is therefore used for the entire set - # of prices. Ideally the - # contains open/high/low/close/volume prices - # Datetime transformation - dt = date2num(rtvol.datetime) - if dt < self.lines.datetime[-1] and not self.p.latethrough: - return False # cannot deliver earlier than already delivered - - self.lines.datetime[0] = dt - - # Put the tick into the bar - tick = rtvol.price - self.lines.open[0] = tick - self.lines.high[0] = tick - self.lines.low[0] = tick - self.lines.close[0] = tick - self.lines.volume[0] = rtvol.size - self.lines.openinterest[0] = 0 - - return True diff --git a/spaces/MWilinski/bot/app.py b/spaces/MWilinski/bot/app.py deleted file mode 100644 index 2e4dbe67dc6e47eae2c1e5538fbb0da53b0b7aa9..0000000000000000000000000000000000000000 --- a/spaces/MWilinski/bot/app.py +++ /dev/null @@ -1,33 +0,0 @@ -import gradio as gr -from api.config import Config -from api.logger import logger -from api.question_answering import QAModel - - -config = Config() -model = QAModel( - llm_model_id=config.question_answering_model_id, - embedding_model_id=config.embedding_model_id, - index_repo_id=config.index_repo_id, - use_docs_for_context=config.use_docs_for_context, - add_sources_to_response=config.add_sources_to_response, - use_messages_for_context=config.use_messages_in_context, - debug=config.debug -) - -with gr.Blocks() as demo: - chatbot = gr.Chatbot() - msg = gr.Textbox() - clear = gr.ClearButton([msg, chatbot]) - - def respond(message, chat_history): - context = "".join(f"User: {msg} \nBot:{bot_msg}\n" for msg, bot_msg in chat_history) - logger.info(f"Context: {context}") - response = model.get_response(message, context) - bot_message = response.get_answer() + response.get_sources_as_text() + "\n" - chat_history.append((message, bot_message)) - return "", chat_history - - msg.submit(respond, [msg, chatbot], [msg, chatbot]) - -demo.launch(share=False, debug=True) diff --git a/spaces/Maharaja36/MyVoiceAssistand/app.py b/spaces/Maharaja36/MyVoiceAssistand/app.py deleted file mode 100644 index ca8b6d40b4ab898c70da92f4a4298de2baf703dc..0000000000000000000000000000000000000000 --- a/spaces/Maharaja36/MyVoiceAssistand/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import os -import re -import requests -import json -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') -PLAY_HT_API_KEY=os.getenv('PLAY_HT_API_KEY') -PLAY_HT_USER_ID=os.getenv('PLAY_HT_USER_ID') - -PLAY_HT_VOICE_ID=os.getenv('PLAY_HT_VOICE_ID') -play_ht_api_get_audio_url = "https://play.ht/api/v2/tts" - - -template = """You are a helpful assistant to answer user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "AUTHORIZATION": "Bearer "+ PLAY_HT_API_KEY, - "X-USER-ID": PLAY_HT_USER_ID -} - - -def get_payload(text): - return { - "text": text, - "voice": PLAY_HT_VOICE_ID, - "quality": "medium", - "output_format": "mp3", - "speed": 1, - "sample_rate": 24000, - "seed": None, - "temperature": None - } - -def get_generated_audio(text): - payload = get_payload(text) - generated_response = {} - try: - response = requests.post(play_ht_api_get_audio_url, json=payload, headers=headers) - response.raise_for_status() - generated_response["type"]= 'SUCCESS' - generated_response["response"] = response.text - except requests.exceptions.RequestException as e: - generated_response["type"]= 'ERROR' - try: - response_text = json.loads(response.text) - if response_text['error_message']: - generated_response["response"] = response_text['error_message'] - else: - generated_response["response"] = response.text - except Exception as e: - generated_response["response"] = response.text - except Exception as e: - generated_response["type"]= 'ERROR' - generated_response["response"] = response.text - return generated_response - -def extract_urls(text): - # Define the regex pattern for URLs - url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+[/\w\.-]*' - - # Find all occurrences of URLs in the text - urls = re.findall(url_pattern, text) - - return urls - -def get_audio_reply_for_question(text): - generated_audio_event = get_generated_audio(text) - #From get_generated_audio, you will get events in a string format, from that we need to extract the url - final_response = { - "audio_url": '', - "message": '' - } - if generated_audio_event["type"] == 'SUCCESS': - audio_urls = extract_urls(generated_audio_event["response"]) - if len(audio_urls) == 0: - final_response['message'] = "No audio file link found in generated event" - else: - final_response['audio_url'] = audio_urls[-1] - else: - final_response['message'] = generated_audio_event['response'] - return final_response - -def download_url(url): - try: - # Send a GET request to the URL to fetch the content - final_response = { - 'content':'', - 'error':'' - } - response = requests.get(url) - # Check if the request was successful (status code 200) - if response.status_code == 200: - final_response['content'] = response.content - else: - final_response['error'] = f"Failed to download the URL. Status code: {response.status_code}" - except Exception as e: - final_response['error'] = f"Failed to download the URL. Error: {e}" - return final_response - -def get_filename_from_url(url): - # Use os.path.basename() to extract the file name from the URL - file_name = os.path.basename(url) - return file_name - -def get_text_response(user_message): - response = llm_chain.predict(user_message = user_message) - return response - -def get_text_response_and_audio_response(user_message): - response = get_text_response(user_message) # Getting the reply from Open AI - audio_reply_for_question_response = get_audio_reply_for_question(response) - final_response = { - 'output_file_path': '', - 'message':'' - } - audio_url = audio_reply_for_question_response['audio_url'] - if audio_url: - output_file_path=get_filename_from_url(audio_url) - download_url_response = download_url(audio_url) - audio_content = download_url_response['content'] - if audio_content: - with open(output_file_path, "wb") as audio_file: - audio_file.write(audio_content) - final_response['output_file_path'] = output_file_path - else: - final_response['message'] = download_url_response['error'] - else: - final_response['message'] = audio_reply_for_question_response['message'] - return final_response - -def chat_bot_response(message, history): - text_and_audio_response = get_text_response_and_audio_response(message) - output_file_path = text_and_audio_response['output_file_path'] - if output_file_path: - return (text_and_audio_response['output_file_path'],) - else: - return text_and_audio_response['message'] - -demo = gr.ChatInterface(chat_bot_response,examples=["How are you doing?","What are your interests?","Which places do you like to visit?"]) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/util/get_tokenlizer.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/util/get_tokenlizer.py deleted file mode 100644 index f7dcf7e95f03f95b20546b26442a94225924618b..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/util/get_tokenlizer.py +++ /dev/null @@ -1,26 +0,0 @@ -from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast - - -def get_tokenlizer(text_encoder_type): - if not isinstance(text_encoder_type, str): - # print("text_encoder_type is not a str") - if hasattr(text_encoder_type, "text_encoder_type"): - text_encoder_type = text_encoder_type.text_encoder_type - elif text_encoder_type.get("text_encoder_type", False): - text_encoder_type = text_encoder_type.get("text_encoder_type") - else: - raise ValueError( - "Unknown type of text_encoder_type: {}".format(type(text_encoder_type)) - ) - print("final text_encoder_type: {}".format(text_encoder_type)) - - tokenizer = AutoTokenizer.from_pretrained(text_encoder_type) - return tokenizer - - -def get_pretrained_language_model(text_encoder_type): - if text_encoder_type == "bert-base-uncased": - return BertModel.from_pretrained(text_encoder_type) - if text_encoder_type == "roberta-base": - return RobertaModel.from_pretrained(text_encoder_type) - raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type)) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py deleted file mode 100644 index edb4c174c51e34c103737ba39bfc48bf831e561d..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py +++ /dev/null @@ -1,46 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='DNLHead', - in_channels=2048, - in_index=3, - channels=512, - dropout_ratio=0.1, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/Mikey211/Project/README.md b/spaces/Mikey211/Project/README.md deleted file mode 100644 index 7fffc6a9ffd4c19661b55b5900f460ef6475f2f5..0000000000000000000000000000000000000000 --- a/spaces/Mikey211/Project/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Project -emoji: 📉 -colorFrom: indigo -colorTo: pink -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/_base_/datasets/icdar2015.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/_base_/datasets/icdar2015.py deleted file mode 100644 index 958cb4fa17f50ed7dc967ccceb11cfb9426cd867..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/_base_/datasets/icdar2015.py +++ /dev/null @@ -1,15 +0,0 @@ -icdar2015_textdet_data_root = 'data/icdar2015' - -icdar2015_textdet_train = dict( - type='OCRDataset', - data_root=icdar2015_textdet_data_root, - ann_file='textdet_train.json', - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=None) - -icdar2015_textdet_test = dict( - type='OCRDataset', - data_root=icdar2015_textdet_data_root, - ann_file='textdet_test.json', - test_mode=True, - pipeline=None) diff --git a/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/templates.py b/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/templates.py deleted file mode 100644 index 036bb02bbc7a0bc4ae4614dc5bf528403ddbedd0..0000000000000000000000000000000000000000 --- a/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/templates.py +++ /dev/null @@ -1,44 +0,0 @@ -css = ''' -' - - -def get_h2o_title(title, description): - # NOTE: Check full width desktop, smallest width browser desktop, iPhone browsers to ensure no overlap etc. - return f"""
- {description} -
-
-
{h2o_logo}
-

{title}

-
-
- -
- """ - - -def get_simple_title(title, description): - return f"""{description}

{title}

""" - - -def get_dark_js() -> str: - return """ - if (document.querySelectorAll('.dark').length) { - document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark')); - } else { - document.querySelector('body').classList.add('dark'); - } - """ - - -def get_heap_js(heapAppId: str) -> str: - return ( - """globalThis.window.heap=window.heap||[],heap.load=function(e,t){window.heap.appid=e,window.heap.config=t=t||{};var r=document.createElement("script");r.type="text/javascript",r.async=!0,r.src="https://cdn.heapanalytics.com/js/heap-"+e+".js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(r,a);for(var n=function(e){return function(){heap.push([e].concat(Array.prototype.slice.call(arguments,0)))}},p=["addEventProperties","addUserProperties","clearEventProperties","identify","resetIdentity","removeEventProperty","setEventProperties","track","unsetEventProperty"],o=0;o str: - """ - Generates a JS code representing JS lambda that wraps all given '*args' code strings. - The lambda function has number of parameters based on 'num_params' and returns them - without modification in an array. Lambda with zero parameters returns an empty array. - """ - params = ", ".join([f"p{i}" for i in range(num_params)]) - newline = "\n" - return f""" - ({params}) => {{ - {newline.join([a for a in args if a is not None])} - return [{params}]; - }} - """ diff --git a/spaces/autosummproject/autosumm/corpora/__init__.py b/spaces/autosummproject/autosumm/corpora/__init__.py deleted file mode 100644 index f795550b16ffbb9c21a78ac6fd075d4129b47b94..0000000000000000000000000000000000000000 --- a/spaces/autosummproject/autosumm/corpora/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .corpora import gen_corpus diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/config/Start Stable Diffusion UI.cmd b/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/config/Start Stable Diffusion UI.cmd deleted file mode 100644 index 4f67c7bfe02d78ed61f1f5d760d28cbd4fc5a0c9..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/ui/config/Start Stable Diffusion UI.cmd +++ /dev/null @@ -1 +0,0 @@ -installer\Scripts\activate.bat diff --git a/spaces/awacke1/Streamlit-Clipboard-Monitor-Javascript/app.py b/spaces/awacke1/Streamlit-Clipboard-Monitor-Javascript/app.py deleted file mode 100644 index c0a75116644a7d1debec4f40ab8128a9e37a2067..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Streamlit-Clipboard-Monitor-Javascript/app.py +++ /dev/null @@ -1,56 +0,0 @@ -import streamlit as st -import streamlit_ace as ace -import base64 -import os -import time - -def get_clipboard_html(): - script_path = os.path.abspath(__file__) - script_dir = os.path.dirname(script_path) - clipboard_path = os.path.join(script_dir, "clipboard.html") - - with open(clipboard_path, "r") as f: - content = f.read() - b64 = base64.b64encode(content.encode()).decode() - return f"data:text/html;base64,{b64}" - -def main(): - st.title("Clipboard Monitor") - - st.write("Paste your data in the input box below:") - - clipboard_data = st.empty() - data_type = st.empty() - data_contents = st.empty() - - st.write(f'', unsafe_allow_html=True) - - # Set up session state - if "last_clipboard" not in st.session_state: - st.session_state.last_clipboard = "" - - while True: - event = st.experimental_get_query_params().get("event", [None])[0] - if event == "clipboard-update": - st.experimental_set_query_params(event="") - clipboard_text = st.experimental_get_query_params().get("data", [""])[0] - clipboard_text = base64.urlsafe_b64decode(clipboard_text).decode(errors="ignore") - - if clipboard_text != st.session_state.last_clipboard: - st.session_state.last_clipboard = clipboard_text - clipboard_data.write(f"Clipboard Data: {clipboard_text}") - - if clipboard_text.startswith("{") and clipboard_text.endswith("}"): - data_type.write("Data Type: JSON") - elif clipboard_text.startswith("<") and clipboard_text.endswith(">"): - data_type.write("Data Type: XML/HTML") - else: - data_type.write("Data Type: Plain Text") - - data_contents.write(f"Data Contents: {clipboard_text}") - - time.sleep(1) - st.experimental_rerun() - -if __name__ == "__main__": - main() diff --git a/spaces/awacke1/runwayml-stable-diffusion-v1-5-06212023/README.md b/spaces/awacke1/runwayml-stable-diffusion-v1-5-06212023/README.md deleted file mode 100644 index 71ff2babce42ed4f9ede19d6146faaf7c4fc36cb..0000000000000000000000000000000000000000 --- a/spaces/awacke1/runwayml-stable-diffusion-v1-5-06212023/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Runwayml Stable Diffusion V1 5 06212023 -emoji: 👁 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/axart-software/simple-beat-generator/app.py b/spaces/axart-software/simple-beat-generator/app.py deleted file mode 100644 index 9873ef6bf4bd03dd9bc137416f278d438a1cf820..0000000000000000000000000000000000000000 --- a/spaces/axart-software/simple-beat-generator/app.py +++ /dev/null @@ -1,56 +0,0 @@ -import gradio as gr -from beatgenerator import BeatGenerator -from datetime import datetime -from transformers import GPT2LMHeadModel, GPT2Tokenizer - -STEP_COUNT = 32 -INSTRUMENT_COUNT = 9 - -model = GPT2LMHeadModel.from_pretrained("./model") -tokenizer = GPT2Tokenizer.from_pretrained("./tokenizer") -tokenizer.pad_token = tokenizer.eos_token -beat_generator = BeatGenerator(model=model, tokenizer=tokenizer) - -def on_submit(*grid_rows) -> [str]: - step_data_container = [] - - for grid_row_id in range(INSTRUMENT_COUNT): - grid_row_as_ints = list(map(lambda x: int(x) - 1, grid_rows[grid_row_id])) - step_data_container.append(grid_row_as_ints) - - temperature: float = grid_rows[9] - tempo: int = grid_rows[10] - now = datetime.now() - date_string = now.strftime("%Y-%m-%d_%H-%M") - - genre, midi_data = beat_generator.generate_beat(user_prompt=step_data_container, temperature=temperature, tempo=tempo) - - return ["""

Genre: {0}


""".format(genre, midi_data, date_string)] - -checkbox_rows = [ - ["{:02d}".format(col + 1) for col in range(STEP_COUNT)] for _ in range(INSTRUMENT_COUNT) -] - -inputs = [ - gr.inputs.CheckboxGroup(checkbox_rows[0], label=f"Kick"), - gr.inputs.CheckboxGroup(checkbox_rows[1], label=f"Snare"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"Clap"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"Hat"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"L tom"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"Open hat"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"M tom"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"Crash cymbal"), - gr.inputs.CheckboxGroup(checkbox_rows[2], label=f"Ride cymbal"), - gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.7, label="Temperature"), - gr.inputs.Slider(minimum=60, maximum=200, step=1, default=120, label="Tempo") -] - -iface = gr.Interface( - fn=on_submit, - inputs=inputs, - outputs=["html"], - title="Simple (MIDI) Beat Generator", - description="A simple beat generator that creates an 8-bar MIDI beats on every run, based on a 32-step (2 bars) prompt in the form of a step sequencer. The generator uses a small fine-tuned GPT-2 model to recognise the genre (currently only Trap and Deep House) and generate the beat." -) - -iface.launch() \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/DRACOLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/DRACOLoader.js deleted file mode 100644 index 57767d78e20ff874366470e97b6445534ad53055..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/DRACOLoader.js +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2016 The Draco Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -'use strict'; - -/** - * @param {THREE.LoadingManager} manager - */ -THREE.DRACOLoader = function(manager) { - this.timeLoaded = 0; - this.manager = manager || THREE.DefaultLoadingManager; - this.materials = null; - this.verbosity = 0; - this.attributeOptions = {}; - this.drawMode = THREE.TrianglesDrawMode; - // Native Draco attribute type to Three.JS attribute type. - this.nativeAttributeMap = { - 'position' : 'POSITION', - 'normal' : 'NORMAL', - 'color' : 'COLOR', - 'uv' : 'TEX_COORD' - }; -}; - -THREE.DRACOLoader.prototype = { - - constructor: THREE.DRACOLoader, - - load: function(url, onLoad, onProgress, onError) { - var scope = this; - var loader = new THREE.FileLoader(scope.manager); - loader.setPath(this.path); - loader.setResponseType('arraybuffer'); - loader.load(url, function(blob) { - scope.decodeDracoFile(blob, onLoad); - }, onProgress, onError); - }, - - setPath: function(value) { - this.path = value; - return this; - }, - - setVerbosity: function(level) { - this.verbosity = level; - return this; - }, - - /** - * Sets desired mode for generated geometry indices. - * Can be either: - * THREE.TrianglesDrawMode - * THREE.TriangleStripDrawMode - */ - setDrawMode: function(drawMode) { - this.drawMode = drawMode; - return this; - }, - - /** - * Skips dequantization for a specific attribute. - * |attributeName| is the THREE.js name of the given attribute type. - * The only currently supported |attributeName| is 'position', more may be - * added in future. - */ - setSkipDequantization: function(attributeName, skip) { - var skipDequantization = true; - if (typeof skip !== 'undefined') - skipDequantization = skip; - this.getAttributeOptions(attributeName).skipDequantization = - skipDequantization; - return this; - }, - - /** - * Decompresses a Draco buffer. Names of attributes (for ID and type maps) - * must be one of the supported three.js types, including: position, color, - * normal, uv, uv2, skinIndex, skinWeight. - * - * @param {ArrayBuffer} rawBuffer - * @param {Function} callback - * @param {Object|undefined} attributeUniqueIdMap Provides a pre-defined ID - * for each attribute in the geometry to be decoded. If given, - * `attributeTypeMap` is required and `nativeAttributeMap` will be - * ignored. - * @param {Object|undefined} attributeTypeMap Provides a predefined data - * type (as a typed array constructor) for each attribute in the - * geometry to be decoded. - */ - decodeDracoFile: function(rawBuffer, callback, attributeUniqueIdMap, - attributeTypeMap) { - var scope = this; - THREE.DRACOLoader.getDecoderModule() - .then( function ( module ) { - scope.decodeDracoFileInternal( rawBuffer, module.decoder, callback, - attributeUniqueIdMap, attributeTypeMap); - }); - }, - - decodeDracoFileInternal: function(rawBuffer, dracoDecoder, callback, - attributeUniqueIdMap, attributeTypeMap) { - /* - * Here is how to use Draco Javascript decoder and get the geometry. - */ - var buffer = new dracoDecoder.DecoderBuffer(); - buffer.Init(new Int8Array(rawBuffer), rawBuffer.byteLength); - var decoder = new dracoDecoder.Decoder(); - - /* - * Determine what type is this file: mesh or point cloud. - */ - var geometryType = decoder.GetEncodedGeometryType(buffer); - if (geometryType == dracoDecoder.TRIANGULAR_MESH) { - if (this.verbosity > 0) { - console.log('Loaded a mesh.'); - } - } else if (geometryType == dracoDecoder.POINT_CLOUD) { - if (this.verbosity > 0) { - console.log('Loaded a point cloud.'); - } - } else { - var errorMsg = 'THREE.DRACOLoader: Unknown geometry type.'; - console.error(errorMsg); - throw new Error(errorMsg); - } - callback(this.convertDracoGeometryTo3JS(dracoDecoder, decoder, - geometryType, buffer, attributeUniqueIdMap, attributeTypeMap)); - }, - - addAttributeToGeometry: function(dracoDecoder, decoder, dracoGeometry, - attributeName, attributeType, attribute, - geometry, geometryBuffer) { - if (attribute.ptr === 0) { - var errorMsg = 'THREE.DRACOLoader: No attribute ' + attributeName; - console.error(errorMsg); - throw new Error(errorMsg); - } - - var numComponents = attribute.num_components(); - var numPoints = dracoGeometry.num_points(); - var numValues = numPoints * numComponents; - var attributeData; - var TypedBufferAttribute; - - switch ( attributeType ) { - - case Float32Array: - attributeData = new dracoDecoder.DracoFloat32Array(); - decoder.GetAttributeFloatForAllPoints( - dracoGeometry, attribute, attributeData); - geometryBuffer[ attributeName ] = new Float32Array( numValues ); - TypedBufferAttribute = THREE.Float32BufferAttribute; - break; - - case Int8Array: - attributeData = new dracoDecoder.DracoInt8Array(); - decoder.GetAttributeInt8ForAllPoints( - dracoGeometry, attribute, attributeData ); - geometryBuffer[ attributeName ] = new Int8Array( numValues ); - TypedBufferAttribute = THREE.Int8BufferAttribute; - break; - - case Int16Array: - attributeData = new dracoDecoder.DracoInt16Array(); - decoder.GetAttributeInt16ForAllPoints( - dracoGeometry, attribute, attributeData); - geometryBuffer[ attributeName ] = new Int16Array( numValues ); - TypedBufferAttribute = THREE.Int16BufferAttribute; - break; - - case Int32Array: - attributeData = new dracoDecoder.DracoInt32Array(); - decoder.GetAttributeInt32ForAllPoints( - dracoGeometry, attribute, attributeData); - geometryBuffer[ attributeName ] = new Int32Array( numValues ); - TypedBufferAttribute = THREE.Int32BufferAttribute; - break; - - case Uint8Array: - attributeData = new dracoDecoder.DracoUInt8Array(); - decoder.GetAttributeUInt8ForAllPoints( - dracoGeometry, attribute, attributeData); - geometryBuffer[ attributeName ] = new Uint8Array( numValues ); - TypedBufferAttribute = THREE.Uint8BufferAttribute; - break; - - case Uint16Array: - attributeData = new dracoDecoder.DracoUInt16Array(); - decoder.GetAttributeUInt16ForAllPoints( - dracoGeometry, attribute, attributeData); - geometryBuffer[ attributeName ] = new Uint16Array( numValues ); - TypedBufferAttribute = THREE.Uint16BufferAttribute; - break; - - case Uint32Array: - attributeData = new dracoDecoder.DracoUInt32Array(); - decoder.GetAttributeUInt32ForAllPoints( - dracoGeometry, attribute, attributeData); - geometryBuffer[ attributeName ] = new Uint32Array( numValues ); - TypedBufferAttribute = THREE.Uint32BufferAttribute; - break; - - default: - var errorMsg = 'THREE.DRACOLoader: Unexpected attribute type.'; - console.error( errorMsg ); - throw new Error( errorMsg ); - - } - - // Copy data from decoder. - for (var i = 0; i < numValues; i++) { - geometryBuffer[attributeName][i] = attributeData.GetValue(i); - } - // Add attribute to THREEJS geometry for rendering. - geometry.addAttribute(attributeName, - new TypedBufferAttribute(geometryBuffer[attributeName], - numComponents)); - dracoDecoder.destroy(attributeData); - }, - - convertDracoGeometryTo3JS: function(dracoDecoder, decoder, geometryType, - buffer, attributeUniqueIdMap, - attributeTypeMap) { - // TODO: Should not assume native Draco attribute IDs apply. - if (this.getAttributeOptions('position').skipDequantization === true) { - decoder.SkipAttributeTransform(dracoDecoder.POSITION); - } - var dracoGeometry; - var decodingStatus; - var start_time = performance.now(); - if (geometryType === dracoDecoder.TRIANGULAR_MESH) { - dracoGeometry = new dracoDecoder.Mesh(); - decodingStatus = decoder.DecodeBufferToMesh(buffer, dracoGeometry); - } else { - dracoGeometry = new dracoDecoder.PointCloud(); - decodingStatus = - decoder.DecodeBufferToPointCloud(buffer, dracoGeometry); - } - if (!decodingStatus.ok() || dracoGeometry.ptr == 0) { - var errorMsg = 'THREE.DRACOLoader: Decoding failed: '; - errorMsg += decodingStatus.error_msg(); - console.error(errorMsg); - dracoDecoder.destroy(decoder); - dracoDecoder.destroy(dracoGeometry); - throw new Error(errorMsg); - } - - var decode_end = performance.now(); - dracoDecoder.destroy(buffer); - /* - * Example on how to retrieve mesh and attributes. - */ - var numFaces; - if (geometryType == dracoDecoder.TRIANGULAR_MESH) { - numFaces = dracoGeometry.num_faces(); - if (this.verbosity > 0) { - console.log('Number of faces loaded: ' + numFaces.toString()); - } - } else { - numFaces = 0; - } - - var numPoints = dracoGeometry.num_points(); - var numAttributes = dracoGeometry.num_attributes(); - if (this.verbosity > 0) { - console.log('Number of points loaded: ' + numPoints.toString()); - console.log('Number of attributes loaded: ' + - numAttributes.toString()); - } - - // Verify if there is position attribute. - // TODO: Should not assume native Draco attribute IDs apply. - var posAttId = decoder.GetAttributeId(dracoGeometry, - dracoDecoder.POSITION); - if (posAttId == -1) { - var errorMsg = 'THREE.DRACOLoader: No position attribute found.'; - console.error(errorMsg); - dracoDecoder.destroy(decoder); - dracoDecoder.destroy(dracoGeometry); - throw new Error(errorMsg); - } - var posAttribute = decoder.GetAttribute(dracoGeometry, posAttId); - - // Structure for converting to THREEJS geometry later. - var geometryBuffer = {}; - // Import data to Three JS geometry. - var geometry = new THREE.BufferGeometry(); - - // Do not use both the native attribute map and a provided (e.g. glTF) map. - if ( attributeUniqueIdMap ) { - - // Add attributes of user specified unique id. E.g. GLTF models. - for (var attributeName in attributeUniqueIdMap) { - var attributeType = attributeTypeMap[attributeName]; - var attributeId = attributeUniqueIdMap[attributeName]; - var attribute = decoder.GetAttributeByUniqueId(dracoGeometry, - attributeId); - this.addAttributeToGeometry(dracoDecoder, decoder, dracoGeometry, - attributeName, attributeType, attribute, geometry, geometryBuffer); - } - - } else { - - // Add native Draco attribute type to geometry. - for (var attributeName in this.nativeAttributeMap) { - var attId = decoder.GetAttributeId(dracoGeometry, - dracoDecoder[this.nativeAttributeMap[attributeName]]); - if (attId !== -1) { - if (this.verbosity > 0) { - console.log('Loaded ' + attributeName + ' attribute.'); - } - var attribute = decoder.GetAttribute(dracoGeometry, attId); - this.addAttributeToGeometry(dracoDecoder, decoder, dracoGeometry, - attributeName, Float32Array, attribute, geometry, geometryBuffer); - } - } - - } - - // For mesh, we need to generate the faces. - if (geometryType == dracoDecoder.TRIANGULAR_MESH) { - if (this.drawMode === THREE.TriangleStripDrawMode) { - var stripsArray = new dracoDecoder.DracoInt32Array(); - var numStrips = decoder.GetTriangleStripsFromMesh( - dracoGeometry, stripsArray); - geometryBuffer.indices = new Uint32Array(stripsArray.size()); - for (var i = 0; i < stripsArray.size(); ++i) { - geometryBuffer.indices[i] = stripsArray.GetValue(i); - } - dracoDecoder.destroy(stripsArray); - } else { - var numIndices = numFaces * 3; - geometryBuffer.indices = new Uint32Array(numIndices); - var ia = new dracoDecoder.DracoInt32Array(); - for (var i = 0; i < numFaces; ++i) { - decoder.GetFaceFromMesh(dracoGeometry, i, ia); - var index = i * 3; - geometryBuffer.indices[index] = ia.GetValue(0); - geometryBuffer.indices[index + 1] = ia.GetValue(1); - geometryBuffer.indices[index + 2] = ia.GetValue(2); - } - dracoDecoder.destroy(ia); - } - } - - geometry.drawMode = this.drawMode; - if (geometryType == dracoDecoder.TRIANGULAR_MESH) { - geometry.setIndex(new(geometryBuffer.indices.length > 65535 ? - THREE.Uint32BufferAttribute : THREE.Uint16BufferAttribute) - (geometryBuffer.indices, 1)); - } - - // TODO: Should not assume native Draco attribute IDs apply. - // TODO: Can other attribute types be quantized? - var posTransform = new dracoDecoder.AttributeQuantizationTransform(); - if (posTransform.InitFromAttribute(posAttribute)) { - // Quantized attribute. Store the quantization parameters into the - // THREE.js attribute. - geometry.attributes['position'].isQuantized = true; - geometry.attributes['position'].maxRange = posTransform.range(); - geometry.attributes['position'].numQuantizationBits = - posTransform.quantization_bits(); - geometry.attributes['position'].minValues = new Float32Array(3); - for (var i = 0; i < 3; ++i) { - geometry.attributes['position'].minValues[i] = - posTransform.min_value(i); - } - } - dracoDecoder.destroy(posTransform); - dracoDecoder.destroy(decoder); - dracoDecoder.destroy(dracoGeometry); - - this.decode_time = decode_end - start_time; - this.import_time = performance.now() - decode_end; - - if (this.verbosity > 0) { - console.log('Decode time: ' + this.decode_time); - console.log('Import time: ' + this.import_time); - } - return geometry; - }, - - isVersionSupported: function(version, callback) { - THREE.DRACOLoader.getDecoderModule() - .then( function ( module ) { - callback( module.decoder.isVersionSupported( version ) ); - }); - }, - - getAttributeOptions: function(attributeName) { - if (typeof this.attributeOptions[attributeName] === 'undefined') - this.attributeOptions[attributeName] = {}; - return this.attributeOptions[attributeName]; - } -}; - -THREE.DRACOLoader.decoderPath = './'; -THREE.DRACOLoader.decoderConfig = {}; -THREE.DRACOLoader.decoderModulePromise = null; - -/** - * Sets the base path for decoder source files. - * @param {string} path - */ -THREE.DRACOLoader.setDecoderPath = function ( path ) { - THREE.DRACOLoader.decoderPath = path; -}; - -/** - * Sets decoder configuration and releases singleton decoder module. Module - * will be recreated with the next decoding call. - * @param {Object} config - */ -THREE.DRACOLoader.setDecoderConfig = function ( config ) { - var wasmBinary = THREE.DRACOLoader.decoderConfig.wasmBinary; - THREE.DRACOLoader.decoderConfig = config || {}; - THREE.DRACOLoader.releaseDecoderModule(); - - // Reuse WASM binary. - if ( wasmBinary ) THREE.DRACOLoader.decoderConfig.wasmBinary = wasmBinary; -}; - -/** - * Releases the singleton DracoDecoderModule instance. Module will be recreated - * with the next decoding call. - */ -THREE.DRACOLoader.releaseDecoderModule = function () { - THREE.DRACOLoader.decoderModulePromise = null; -}; - -/** - * Gets WebAssembly or asm.js singleton instance of DracoDecoderModule - * after testing for browser support. Returns Promise that resolves when - * module is available. - * @return {Promise<{decoder: DracoDecoderModule}>} - */ -THREE.DRACOLoader.getDecoderModule = function () { - var scope = this; - var path = THREE.DRACOLoader.decoderPath; - var config = THREE.DRACOLoader.decoderConfig; - var promise = THREE.DRACOLoader.decoderModulePromise; - - if ( promise ) return promise; - - // Load source files. - if ( typeof DracoDecoderModule !== 'undefined' ) { - // Loaded externally. - promise = Promise.resolve(); - } else if ( typeof WebAssembly !== 'object' || config.type === 'js' ) { - // Load with asm.js. - promise = THREE.DRACOLoader._loadScript( path + 'draco_decoder.js' ); - } else { - // Load with WebAssembly. - config.wasmBinaryFile = path + 'draco_decoder.wasm'; - promise = THREE.DRACOLoader._loadScript( path + 'draco_wasm_wrapper.js' ) - .then( function () { - return THREE.DRACOLoader._loadArrayBuffer( config.wasmBinaryFile ); - } ) - .then( function ( wasmBinary ) { - config.wasmBinary = wasmBinary; - } ); - } - - // Wait for source files, then create and return a decoder. - promise = promise.then( function () { - return new Promise( function ( resolve ) { - config.onModuleLoaded = function ( decoder ) { - scope.timeLoaded = performance.now(); - // Module is Promise-like. Wrap before resolving to avoid loop. - resolve( { decoder: decoder } ); - }; - DracoDecoderModule( config ); - } ); - } ); - - THREE.DRACOLoader.decoderModulePromise = promise; - return promise; -}; - -/** - * @param {string} src - * @return {Promise} - */ -THREE.DRACOLoader._loadScript = function ( src ) { - var prevScript = document.getElementById( 'decoder_script' ); - if ( prevScript !== null ) { - prevScript.parentNode.removeChild( prevScript ); - } - var head = document.getElementsByTagName( 'head' )[ 0 ]; - var script = document.createElement( 'script' ); - script.id = 'decoder_script'; - script.type = 'text/javascript'; - script.src = src; - return new Promise( function ( resolve ) { - script.onload = resolve; - head.appendChild( script ); - }); -}; - -/** - * @param {string} src - * @return {Promise} - */ -THREE.DRACOLoader._loadArrayBuffer = function ( src ) { - var loader = new THREE.FileLoader(); - loader.setResponseType( 'arraybuffer' ); - return new Promise( function( resolve, reject ) { - loader.load( src, resolve, undefined, reject ); - }); -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshStandardMaterial.js b/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshStandardMaterial.js deleted file mode 100644 index cbbe32dc34d1cfb4a2e6bcf102c708af2978e50e..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/materials/MeshStandardMaterial.js +++ /dev/null @@ -1,180 +0,0 @@ -import { TangentSpaceNormalMap } from '../constants.js'; -import { Material } from './Material.js'; -import { Vector2 } from '../math/Vector2.js'; -import { Color } from '../math/Color.js'; - -/** - * @author WestLangley / http://github.com/WestLangley - * - * parameters = { - * color: , - * roughness: , - * metalness: , - * opacity: , - * - * map: new THREE.Texture( ), - * - * lightMap: new THREE.Texture( ), - * lightMapIntensity: - * - * aoMap: new THREE.Texture( ), - * aoMapIntensity: - * - * emissive: , - * emissiveIntensity: - * emissiveMap: new THREE.Texture( ), - * - * bumpMap: new THREE.Texture( ), - * bumpScale: , - * - * normalMap: new THREE.Texture( ), - * normalMapType: THREE.TangentSpaceNormalMap, - * normalScale: , - * - * displacementMap: new THREE.Texture( ), - * displacementScale: , - * displacementBias: , - * - * roughnessMap: new THREE.Texture( ), - * - * metalnessMap: new THREE.Texture( ), - * - * alphaMap: new THREE.Texture( ), - * - * envMap: new THREE.CubeTexture( [posx, negx, posy, negy, posz, negz] ), - * envMapIntensity: - * - * refractionRatio: , - * - * wireframe: , - * wireframeLinewidth: , - * - * skinning: , - * morphTargets: , - * morphNormals: - * } - */ - -function MeshStandardMaterial( parameters ) { - - Material.call( this ); - - this.defines = { 'STANDARD': '' }; - - this.type = 'MeshStandardMaterial'; - - this.color = new Color( 0xffffff ); // diffuse - this.roughness = 0.5; - this.metalness = 0.5; - - this.map = null; - - this.lightMap = null; - this.lightMapIntensity = 1.0; - - this.aoMap = null; - this.aoMapIntensity = 1.0; - - this.emissive = new Color( 0x000000 ); - this.emissiveIntensity = 1.0; - this.emissiveMap = null; - - this.bumpMap = null; - this.bumpScale = 1; - - this.normalMap = null; - this.normalMapType = TangentSpaceNormalMap; - this.normalScale = new Vector2( 1, 1 ); - - this.displacementMap = null; - this.displacementScale = 1; - this.displacementBias = 0; - - this.roughnessMap = null; - - this.metalnessMap = null; - - this.alphaMap = null; - - this.envMap = null; - this.envMapIntensity = 1.0; - - this.refractionRatio = 0.98; - - this.wireframe = false; - this.wireframeLinewidth = 1; - this.wireframeLinecap = 'round'; - this.wireframeLinejoin = 'round'; - - this.skinning = false; - this.morphTargets = false; - this.morphNormals = false; - - this.setValues( parameters ); - -} - -MeshStandardMaterial.prototype = Object.create( Material.prototype ); -MeshStandardMaterial.prototype.constructor = MeshStandardMaterial; - -MeshStandardMaterial.prototype.isMeshStandardMaterial = true; - -MeshStandardMaterial.prototype.copy = function ( source ) { - - Material.prototype.copy.call( this, source ); - - this.defines = { 'STANDARD': '' }; - - this.color.copy( source.color ); - this.roughness = source.roughness; - this.metalness = source.metalness; - - this.map = source.map; - - this.lightMap = source.lightMap; - this.lightMapIntensity = source.lightMapIntensity; - - this.aoMap = source.aoMap; - this.aoMapIntensity = source.aoMapIntensity; - - this.emissive.copy( source.emissive ); - this.emissiveMap = source.emissiveMap; - this.emissiveIntensity = source.emissiveIntensity; - - this.bumpMap = source.bumpMap; - this.bumpScale = source.bumpScale; - - this.normalMap = source.normalMap; - this.normalMapType = source.normalMapType; - this.normalScale.copy( source.normalScale ); - - this.displacementMap = source.displacementMap; - this.displacementScale = source.displacementScale; - this.displacementBias = source.displacementBias; - - this.roughnessMap = source.roughnessMap; - - this.metalnessMap = source.metalnessMap; - - this.alphaMap = source.alphaMap; - - this.envMap = source.envMap; - this.envMapIntensity = source.envMapIntensity; - - this.refractionRatio = source.refractionRatio; - - this.wireframe = source.wireframe; - this.wireframeLinewidth = source.wireframeLinewidth; - this.wireframeLinecap = source.wireframeLinecap; - this.wireframeLinejoin = source.wireframeLinejoin; - - this.skinning = source.skinning; - this.morphTargets = source.morphTargets; - this.morphNormals = source.morphNormals; - - return this; - -}; - - -export { MeshStandardMaterial }; diff --git a/spaces/bassazayda/Whisper/app.py b/spaces/bassazayda/Whisper/app.py deleted file mode 100644 index 04064482f9f6e84c6bf3d8a7095a34399655764c..0000000000000000000000000000000000000000 --- a/spaces/bassazayda/Whisper/app.py +++ /dev/null @@ -1,27 +0,0 @@ -import gradio as gr -import whisper -from langcodes import * - -def speech_to_text(tmp_filename, uploaded, model_size): - model = whisper.load_model(model_size) - source = uploaded if uploaded is not None else tmp_filename - result = model.transcribe(source) - return f'Detected language: {Language.make(language=result["language"]).display_name()}\n\n You said: {result["text"]}' - - -gr.Interface( - - title="Whisper by OpenAI", - thumbnail="https://cdn.openai.com/whisper/asr-summary-of-model-architecture-desktop.svg", - css=""" - .gr-prose p{text-align: center;} - .gr-button {background: black;color: white} - """, - description="Whisper is an automatic speech recognition (ASR) system trained on 680,000 hours of multilingual and multitask supervised data collected from the web.", - fn=speech_to_text, - inputs=[ - gr.Audio(label="Record your voice on your mic",source="microphone", type="filepath"), - gr.Audio(source="upload", type="filepath", label="Upload Audio"), - gr.Dropdown(label="Select model size",value="base",choices=["tiny", "base", "small", "medium", "large"])], - outputs="text").launch() - diff --git a/spaces/baulab/Erasing-Concepts-In-Diffusion/train.py b/spaces/baulab/Erasing-Concepts-In-Diffusion/train.py deleted file mode 100644 index 98fbef04d62f1b5a84ea74e3b0ff99e2df9e973f..0000000000000000000000000000000000000000 --- a/spaces/baulab/Erasing-Concepts-In-Diffusion/train.py +++ /dev/null @@ -1,91 +0,0 @@ -from StableDiffuser import StableDiffuser -from finetuning import FineTunedModel -import torch -from tqdm import tqdm - -def train(prompt, modules, freeze_modules, iterations, negative_guidance, lr, save_path): - - nsteps = 50 - - diffuser = StableDiffuser(scheduler='DDIM').to('cuda') - diffuser.train() - - finetuner = FineTunedModel(diffuser, modules, frozen_modules=freeze_modules) - - optimizer = torch.optim.Adam(finetuner.parameters(), lr=lr) - criteria = torch.nn.MSELoss() - - pbar = tqdm(range(iterations)) - - with torch.no_grad(): - - neutral_text_embeddings = diffuser.get_text_embeddings([''],n_imgs=1) - positive_text_embeddings = diffuser.get_text_embeddings([prompt],n_imgs=1) - - del diffuser.vae - del diffuser.text_encoder - del diffuser.tokenizer - - torch.cuda.empty_cache() - - for i in pbar: - - with torch.no_grad(): - - diffuser.set_scheduler_timesteps(nsteps) - - optimizer.zero_grad() - - iteration = torch.randint(1, nsteps - 1, (1,)).item() - - latents = diffuser.get_initial_latents(1, 512, 1) - - with finetuner: - - latents_steps, _ = diffuser.diffusion( - latents, - positive_text_embeddings, - start_iteration=0, - end_iteration=iteration, - guidance_scale=3, - show_progress=False - ) - - diffuser.set_scheduler_timesteps(1000) - - iteration = int(iteration / nsteps * 1000) - - positive_latents = diffuser.predict_noise(iteration, latents_steps[0], positive_text_embeddings, guidance_scale=1) - neutral_latents = diffuser.predict_noise(iteration, latents_steps[0], neutral_text_embeddings, guidance_scale=1) - - with finetuner: - negative_latents = diffuser.predict_noise(iteration, latents_steps[0], positive_text_embeddings, guidance_scale=1) - - positive_latents.requires_grad = False - neutral_latents.requires_grad = False - - loss = criteria(negative_latents, neutral_latents - (negative_guidance*(positive_latents - neutral_latents))) #loss = criteria(e_n, e_0) works the best try 5000 epochs - - loss.backward() - optimizer.step() - - torch.save(finetuner.state_dict(), save_path) - - del diffuser, loss, optimizer, finetuner, negative_latents, neutral_latents, positive_latents, latents_steps, latents - - torch.cuda.empty_cache() -if __name__ == '__main__': - - import argparse - - parser = argparse.ArgumentParser() - - parser.add_argument('--prompt', required=True) - parser.add_argument('--modules', required=True) - parser.add_argument('--freeze_modules', nargs='+', required=True) - parser.add_argument('--save_path', required=True) - parser.add_argument('--iterations', type=int, required=True) - parser.add_argument('--lr', type=float, required=True) - parser.add_argument('--negative_guidance', type=float, required=True) - - train(**vars(parser.parse_args())) \ No newline at end of file diff --git a/spaces/befozg/stylematte/app.py b/spaces/befozg/stylematte/app.py deleted file mode 100644 index 70cd18a2226446c09463b99511c2392040b50846..0000000000000000000000000000000000000000 --- a/spaces/befozg/stylematte/app.py +++ /dev/null @@ -1,36 +0,0 @@ -import gradio as gr -from test import inference_img -from models import * -import numpy as np -from PIL import Image - -device='cpu' -model = StyleMatte() -model = model.to(device) -checkpoint = f"stylematte.pth" -state_dict = torch.load(checkpoint, map_location=f'{device}') - -model.load_state_dict(state_dict) -model.eval() - -def predict(inp): - print("***********Inference****************") - mask = inference_img(model, inp) - inp_np = np.array(inp) - fg = np.uint8((mask[:,:,None]*inp_np)) - print("***********Inference finish****************") - # print("***********MASK****************", inp_np.max(), mask.max()) - fg_pil = Image.fromarray(fg) - - return [mask, fg_pil] - -print("MODEL LOADED") -print("************************************") - -iface = gr.Interface(fn=predict, - inputs=gr.Image(type="numpy"), - outputs=[gr.Image(type="numpy"),gr.Image(type="pil", image_mode='RGBA')], - examples=["./logo.jpeg"]) -print("****************Interface created******************") - -iface.launch() \ No newline at end of file diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327010010.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327010010.py deleted file mode 100644 index a349a305ffad302adaf14d0cf03ce569a4819766..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327010010.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -#os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -import warnings -warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.') -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - return Image.fromarray(restored_img[0][:,:,::-1]) - -title = "GFP-GAN" -description = "Gradio demo for GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please click submit only once" -article = "

Towards Real-World Blind Face Restoration with Generative Facial Prior | Github Repo

visitor badge
" -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True) - - diff --git a/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite 2017 19.1.0.434 Ml (x86-x64) Serial Key Keygen _BEST_.md b/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite 2017 19.1.0.434 Ml (x86-x64) Serial Key Keygen _BEST_.md deleted file mode 100644 index e84c297d73c595187fc1b8ab1e0231970cf5ff46..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite 2017 19.1.0.434 Ml (x86-x64) Serial Key Keygen _BEST_.md +++ /dev/null @@ -1,6 +0,0 @@ -

CorelDRAW Graphics Suite 2017 19.1.0.434 Ml (x86-x64) Serial Key Keygen


Download Ziphttps://urloso.com/2uyPOd



- -Ultimate 7.8.12 Build 20151119 + Serials +keygen + registry (FULL). ... HitFilm Ultimate 2.0.2522.46168 (64 bit) (crack Figgler) [ChingL Serial Key PATCHED ... CorelDRAW Graphics Suite 2017 19.1.0.434 Ml (x86-x64) 64 bit 1fdad05405
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/EngenhariaDeDrenagemSuperficialPauloSampaioWilkenl NEW.md b/spaces/bioriAsaeru/text-to-voice/EngenhariaDeDrenagemSuperficialPauloSampaioWilkenl NEW.md deleted file mode 100644 index 5a4975d1c23207a06f9d979f14dd3793d5522beb..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/EngenhariaDeDrenagemSuperficialPauloSampaioWilkenl NEW.md +++ /dev/null @@ -1,6 +0,0 @@ -

EngenhariaDeDrenagemSuperficialPauloSampaioWilkenl


Download ->>->>->> https://urloso.com/2uyPp9



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/brainblow/MusiCreator/tests/modules/test_codebooks_patterns.py b/spaces/brainblow/MusiCreator/tests/modules/test_codebooks_patterns.py deleted file mode 100644 index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000 --- a/spaces/brainblow/MusiCreator/tests/modules/test_codebooks_patterns.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.modules.codebooks_patterns import ( - DelayedPatternProvider, - ParallelPatternProvider, - Pattern, - UnrolledPatternProvider, -) - - -class TestParallelPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == s - 1 # account for the 1st empty step - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_max_delay(self, n_q: int, timesteps: int): - provider = ParallelPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == 0 - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestDelayedPatternProvider: - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [0, 1, 16, 100]) - def test_get_pattern(self, n_q: int, timesteps: int): - delays = [ - list(range(n_q)), - [0] + [1] * (n_q - 1), - [0] + [4] * (n_q - 1), - ] - for delay in delays: - provider = DelayedPatternProvider(n_q, delay) - pattern = provider.get_pattern(timesteps) - # + 1 to account for 1st step - assert len(pattern.layout) == timesteps + max(delay) + 1 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - def test_pattern_content(self, n_q: int, timesteps: int): - provider = DelayedPatternProvider(n_q) - pattern = provider.get_pattern(timesteps) - for s, v in enumerate(pattern.layout): - for i, code in enumerate(v): - assert i == code.q - assert code.t == max(0, s - code.q - 1) - - @pytest.mark.parametrize("timesteps", [8, 16, 100]) - @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]]) - def test_pattern_max_delay(self, timesteps: int, delay: list): - provider = DelayedPatternProvider(len(delay), delay) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max(delay) - assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay - - -class TestUnrolledPatternProvider: - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_get_pattern(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay - - @pytest.mark.parametrize("timesteps", [0, 1, 16]) - @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]]) - @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]]) - def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list): - n_q = len(flattening) - max_delay = max(delays) - provider = UnrolledPatternProvider(n_q, flattening, delays) - pattern = provider.get_pattern(timesteps) - assert pattern.max_delay == max_delay - - -class TestPattern: - - def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to build the sequence from the pattern without using fancy scatter.""" - bs, n_q, T = z.shape - z = z.cpu().numpy() - assert n_q == pattern.n_q - assert T <= pattern.timesteps - inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < T: - inp[:, q, s] = z[:, q, t] - return torch.from_numpy(inp) - - def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int): - """Reference method to revert the sequence from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, n_q, S = z.shape - assert pattern.n_q == n_q - inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy() - inp[:] = special_token - for s, v in enumerate(pattern.layout): - for (t, q) in v: - if t < pattern.timesteps: - inp[:, q, t] = z[:, q, s] - return torch.from_numpy(inp) - - def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float): - """Reference method to revert the logits from the pattern without using fancy scatter.""" - z = z.cpu().numpy() - bs, card, n_q, S = z.shape - assert pattern.n_q == n_q - ref_layout = pattern.layout - inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy() - inp[:] = special_token - for s, v in enumerate(ref_layout[1:]): - if s < S: - for (t, q) in v: - if t < pattern.timesteps: - inp[:, :, q, t] = z[:, :, q, s] - return torch.from_numpy(inp) - - def _get_pattern_providers(self, n_q: int): - pattern_provider_1 = ParallelPatternProvider(n_q) - pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q))) - pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1)) - pattern_provider_4 = UnrolledPatternProvider( - n_q, flattening=list(range(n_q)), delays=[0] * n_q - ) - pattern_provider_5 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q - ) - pattern_provider_6 = UnrolledPatternProvider( - n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1) - ) - return [ - pattern_provider_1, - pattern_provider_2, - pattern_provider_3, - pattern_provider_4, - pattern_provider_5, - pattern_provider_6, - ] - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_build_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # we can correctly build the sequence from the pattern - z = torch.randint(0, card, (bs, n_q, timesteps)) - ref_res = self.ref_build_pattern_sequence(z, pattern, special_token) - res, indexes, mask = pattern.build_pattern_sequence(z, special_token) - assert (res == ref_res).float().mean() == 1.0 - - # expected assertion fails on the number of timesteps - invalid_timesteps = [timesteps + 1] - if pattern.num_sequence_steps != pattern.timesteps: - invalid_timesteps.append(pattern.num_sequence_steps) - for i_timesteps in invalid_timesteps: - z2 = torch.randint(0, card, (bs, n_q, i_timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z2, special_token) - - # expected assertion fails on the number of codebooks - invalid_qs = [0, n_q - 1, n_q + 1] - for i_q in invalid_qs: - z3 = torch.randint(0, card, (bs, i_q, timesteps)) - with pytest.raises(AssertionError): - pattern.build_pattern_sequence(z3, special_token) - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - def test_revert_pattern_sequence(self, n_q: int, timesteps: int): - bs = 2 - card = 256 - special_token = card - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token) - # ensure our reference script retrieve the original sequence - assert z.shape == ref_out.shape - assert (z == ref_out).float().mean() == 1.0 - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_sequence(s, special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 - - @pytest.mark.parametrize("n_q", [1, 4, 32]) - @pytest.mark.parametrize("timesteps", [16, 72]) - @pytest.mark.parametrize("card", [1, 2, 256, 1024]) - def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int): - bs = 2 - special_token = card - logits_special_token = float('nan') - - pattern_providers = self._get_pattern_providers(n_q) - for pattern_provider in pattern_providers: - pattern = pattern_provider.get_pattern(timesteps) - # this works assuming previous tests are successful - z = torch.randint(0, card, (bs, n_q, timesteps)) - s = self.ref_build_pattern_sequence(z, pattern, special_token) - logits = torch.randn((bs, card, n_q, s.shape[-1])) - ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token) - # ensure our reference script retrieve the original sequence - assert ref_out.shape == torch.Size([bs, card, n_q, timesteps]) - # now we can test the scatter version - out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token) - assert out.shape == ref_out.shape - assert (out == ref_out).float().mean() == 1.0 diff --git a/spaces/brainblow/beat_remixer/beat_manipulator/effects.py b/spaces/brainblow/beat_remixer/beat_manipulator/effects.py deleted file mode 100644 index 19ee796cf015987b28ee60d5931089be6a533178..0000000000000000000000000000000000000000 --- a/spaces/brainblow/beat_remixer/beat_manipulator/effects.py +++ /dev/null @@ -1,84 +0,0 @@ -import numpy as np -from . import io - -def deco_abs(effect): - def stuff(*args, **kwargs): - if len(args)>0: audio = args[0] - else: audio = kwargs['audio'] - if not isinstance(audio, np.ndarray): audio = io._load(audio) - audio_signs = np.sign(audio) - audio = np.abs(audio) - if len(args)>0: args[0] = audio - else: kwargs['audio'] = audio - audio = effect(*args, **kwargs) - audio *= audio_signs - return stuff - - - -def volume(audio: np.ndarray, v: float): - return audio*v - -def speed(audio: np.ndarray, s: float = 2, precision:int = 24): - if s%1 != 0 and (1/s)%1 != 0: - import fractions - s = fractions.Fraction(s).limit_denominator(precision) - audio = np.repeat(audio, s.denominator, axis=1) - return audio[:,::s.numerator] - elif s%1 == 0: - return audio[:,::int(s)] - else: - return np.repeat(audio, int(1/s), axis=1) - -def channel(audio: np.ndarray, c:int = None): - if c is None: - audio[0], audio[1] = audio[1], audio[0] - return audio - elif c == 0: - audio[0] = 0 - return audio - else: - audio[1] = 0 - return audio - -def downsample(audio: np.ndarray, d:int = 10): - return np.repeat(audio[:,::d], d, axis=1) - -def gradient(audio: np.ndarray, number: int = 1): - for _ in range(number): - audio = np.gradient(audio, axis=1) - return audio - -def bitcrush(audio: np.ndarray, b:float = 4): - if 1/b > 1: - return np.around(audio, decimals=int(1/b)) - else: - return np.around(audio*b, decimals = 1) - -def reverse(audio: np.ndarray): - return audio[:,::-1] - -def normalize(audio: np.ndarray): - return audio*(1/np.max(np.abs(audio))) - -def clip(audio: np.ndarray): - return np.clip(audio, -1, 1) - -def to_sidechain(audio: np.ndarray): - audio = np.clip(np.abs(audio), -1, 1) - for channel in range(len(audio)): - audio[channel] = np.abs(1 - np.convolve(audio[channel], np.ones(shape=(1000)), mode = 'same')) - return audio - - - -# some stuff is defined in main.py to reduce function calls for 1 line stuff -BM_EFFECTS = { - "v": "volume", - "s": speed, - "c": channel, - "d": "downsample", - "g": "gradient", - "b": bitcrush, - "r": "reverse", -} \ No newline at end of file diff --git a/spaces/bunnyg20081061/world2/app.py b/spaces/bunnyg20081061/world2/app.py deleted file mode 100644 index afbf4b2426ffb5b89ebf1efb371959d5a8a00088..0000000000000000000000000000000000000000 --- a/spaces/bunnyg20081061/world2/app.py +++ /dev/null @@ -1,2 +0,0 @@ -import gradio as gr -gr.Interface.load("huggingface/gp2").launch() \ No newline at end of file diff --git a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op/conv2d_gradfix.py b/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op/conv2d_gradfix.py deleted file mode 100644 index 5e4b83adac8e6a4b1caf522596666e4f5d0ee854..0000000000000000000000000000000000000000 --- a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op/conv2d_gradfix.py +++ /dev/null @@ -1,227 +0,0 @@ -import contextlib -import warnings - -import torch -from torch import autograd -from torch.nn import functional as F - -enabled = True -weight_gradients_disabled = False - - -@contextlib.contextmanager -def no_weight_gradients(): - global weight_gradients_disabled - - old = weight_gradients_disabled - weight_gradients_disabled = True - yield - weight_gradients_disabled = old - - -def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): - if could_use_op(input): - return conv2d_gradfix( - transpose=False, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=0, - dilation=dilation, - groups=groups, - ).apply(input, weight, bias) - - return F.conv2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - ) - - -def conv_transpose2d( - input, - weight, - bias=None, - stride=1, - padding=0, - output_padding=0, - groups=1, - dilation=1, -): - if could_use_op(input): - return conv2d_gradfix( - transpose=True, - weight_shape=weight.shape, - stride=stride, - padding=padding, - output_padding=output_padding, - groups=groups, - dilation=dilation, - ).apply(input, weight, bias) - - return F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - stride=stride, - padding=padding, - output_padding=output_padding, - dilation=dilation, - groups=groups, - ) - - -def could_use_op(input): - if (not enabled) or (not torch.backends.cudnn.enabled): - return False - - if input.device.type != "cuda": - return False - - if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]): - return True - - #warnings.warn( - # f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()." - #) - - return False - - -def ensure_tuple(xs, ndim): - xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim - - return xs - - -conv2d_gradfix_cache = dict() - - -def conv2d_gradfix( - transpose, weight_shape, stride, padding, output_padding, dilation, groups -): - ndim = 2 - weight_shape = tuple(weight_shape) - stride = ensure_tuple(stride, ndim) - padding = ensure_tuple(padding, ndim) - output_padding = ensure_tuple(output_padding, ndim) - dilation = ensure_tuple(dilation, ndim) - - key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups) - if key in conv2d_gradfix_cache: - return conv2d_gradfix_cache[key] - - common_kwargs = dict( - stride=stride, padding=padding, dilation=dilation, groups=groups - ) - - def calc_output_padding(input_shape, output_shape): - if transpose: - return [0, 0] - - return [ - input_shape[i + 2] - - (output_shape[i + 2] - 1) * stride[i] - - (1 - 2 * padding[i]) - - dilation[i] * (weight_shape[i + 2] - 1) - for i in range(ndim) - ] - - class Conv2d(autograd.Function): - @staticmethod - def forward(ctx, input, weight, bias): - if not transpose: - out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs) - - else: - out = F.conv_transpose2d( - input=input, - weight=weight, - bias=bias, - output_padding=output_padding, - **common_kwargs, - ) - - ctx.save_for_backward(input, weight) - - return out - - @staticmethod - def backward(ctx, grad_output): - input, weight = ctx.saved_tensors - grad_input, grad_weight, grad_bias = None, None, None - - if ctx.needs_input_grad[0]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, weight, None) - - if ctx.needs_input_grad[1] and not weight_gradients_disabled: - grad_weight = Conv2dGradWeight.apply(grad_output, input) - - if ctx.needs_input_grad[2]: - grad_bias = grad_output.sum((0, 2, 3)) - - return grad_input, grad_weight, grad_bias - - class Conv2dGradWeight(autograd.Function): - @staticmethod - def forward(ctx, grad_output, input): - op = torch._C._jit_get_operation( - "aten::cudnn_convolution_backward_weight" - if not transpose - else "aten::cudnn_convolution_transpose_backward_weight" - ) - flags = [ - torch.backends.cudnn.benchmark, - torch.backends.cudnn.deterministic, - torch.backends.cudnn.allow_tf32, - ] - grad_weight = op( - weight_shape, - grad_output, - input, - padding, - stride, - dilation, - groups, - *flags, - ) - ctx.save_for_backward(grad_output, input) - - return grad_weight - - @staticmethod - def backward(ctx, grad_grad_weight): - grad_output, input = ctx.saved_tensors - grad_grad_output, grad_grad_input = None, None - - if ctx.needs_input_grad[0]: - grad_grad_output = Conv2d.apply(input, grad_grad_weight, None) - - if ctx.needs_input_grad[1]: - p = calc_output_padding( - input_shape=input.shape, output_shape=grad_output.shape - ) - grad_grad_input = conv2d_gradfix( - transpose=(not transpose), - weight_shape=weight_shape, - output_padding=p, - **common_kwargs, - ).apply(grad_output, grad_grad_weight, None) - - return grad_grad_output, grad_grad_input - - conv2d_gradfix_cache[key] = Conv2d - - return Conv2d diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/evaluation/sem_seg_evaluation.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/evaluation/sem_seg_evaluation.py deleted file mode 100644 index 3735de62761bd6be4444250dcd4a83239666af1f..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/evaluation/sem_seg_evaluation.py +++ /dev/null @@ -1,265 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import itertools -import json -import logging -import numpy as np -import os -from collections import OrderedDict -from typing import Optional, Union -import pycocotools.mask as mask_util -import torch -from PIL import Image - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.utils.comm import all_gather, is_main_process, synchronize -from detectron2.utils.file_io import PathManager - -from .evaluator import DatasetEvaluator - -_CV2_IMPORTED = True -try: - import cv2 # noqa -except ImportError: - # OpenCV is an optional dependency at the moment - _CV2_IMPORTED = False - - -def load_image_into_numpy_array( - filename: str, - copy: bool = False, - dtype: Optional[Union[np.dtype, str]] = None, -) -> np.ndarray: - with PathManager.open(filename, "rb") as f: - array = np.array(Image.open(f), copy=copy, dtype=dtype) - return array - - -class SemSegEvaluator(DatasetEvaluator): - """ - Evaluate semantic segmentation metrics. - """ - - def __init__( - self, - dataset_name, - distributed=True, - output_dir=None, - *, - sem_seg_loading_fn=load_image_into_numpy_array, - num_classes=None, - ignore_label=None, - ): - """ - Args: - dataset_name (str): name of the dataset to be evaluated. - distributed (bool): if True, will collect results from all ranks for evaluation. - Otherwise, will evaluate the results in the current process. - output_dir (str): an output directory to dump results. - sem_seg_loading_fn: function to read sem seg file and load into numpy array. - Default provided, but projects can customize. - num_classes, ignore_label: deprecated argument - """ - self._logger = logging.getLogger(__name__) - if num_classes is not None: - self._logger.warn( - "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata." - ) - if ignore_label is not None: - self._logger.warn( - "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata." - ) - self._dataset_name = dataset_name - self._distributed = distributed - self._output_dir = output_dir - - self._cpu_device = torch.device("cpu") - - self.input_file_to_gt_file = { - dataset_record["file_name"]: dataset_record["sem_seg_file_name"] - for dataset_record in DatasetCatalog.get(dataset_name) - } - - meta = MetadataCatalog.get(dataset_name) - # Dict that maps contiguous training ids to COCO category ids - try: - c2d = meta.stuff_dataset_id_to_contiguous_id - self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} - except AttributeError: - self._contiguous_id_to_dataset_id = None - self._class_names = meta.stuff_classes - self.sem_seg_loading_fn = sem_seg_loading_fn - self._num_classes = len(meta.stuff_classes) - if num_classes is not None: - assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}" - self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label - - # This is because cv2.erode did not work for int datatype. Only works for uint8. - self._compute_boundary_iou = True - if not _CV2_IMPORTED: - self._compute_boundary_iou = False - self._logger.warn( - """Boundary IoU calculation requires OpenCV. B-IoU metrics are - not going to be computed because OpenCV is not available to import.""" - ) - if self._num_classes >= np.iinfo(np.uint8).max: - self._compute_boundary_iou = False - self._logger.warn( - f"""SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation! - B-IoU metrics are not going to be computed. Max allowed value (exclusive) - for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}. - The number of classes of dataset {self._dataset_name} is {self._num_classes}""" - ) - - def reset(self): - self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64) - self._b_conf_matrix = np.zeros( - (self._num_classes + 1, self._num_classes + 1), dtype=np.int64 - ) - self._predictions = [] - - def process(self, inputs, outputs): - """ - Args: - inputs: the inputs to a model. - It is a list of dicts. Each dict corresponds to an image and - contains keys like "height", "width", "file_name". - outputs: the outputs of a model. It is either list of semantic segmentation predictions - (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic - segmentation prediction in the same format. - """ - for input, output in zip(inputs, outputs): - output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) - pred = np.array(output, dtype=np.int) - gt_filename = self.input_file_to_gt_file[input["file_name"]] - gt = self.sem_seg_loading_fn(gt_filename, dtype=np.int) - - gt[gt == self._ignore_label] = self._num_classes - - self._conf_matrix += np.bincount( - (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), - minlength=self._conf_matrix.size, - ).reshape(self._conf_matrix.shape) - - if self._compute_boundary_iou: - b_gt = self._mask_to_boundary(gt.astype(np.uint8)) - b_pred = self._mask_to_boundary(pred.astype(np.uint8)) - - self._b_conf_matrix += np.bincount( - (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), - minlength=self._conf_matrix.size, - ).reshape(self._conf_matrix.shape) - - self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) - - def evaluate(self): - """ - Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): - - * Mean intersection-over-union averaged across classes (mIoU) - * Frequency Weighted IoU (fwIoU) - * Mean pixel accuracy averaged across classes (mACC) - * Pixel Accuracy (pACC) - """ - if self._distributed: - synchronize() - conf_matrix_list = all_gather(self._conf_matrix) - b_conf_matrix_list = all_gather(self._b_conf_matrix) - self._predictions = all_gather(self._predictions) - self._predictions = list(itertools.chain(*self._predictions)) - if not is_main_process(): - return - - self._conf_matrix = np.zeros_like(self._conf_matrix) - for conf_matrix in conf_matrix_list: - self._conf_matrix += conf_matrix - - self._b_conf_matrix = np.zeros_like(self._b_conf_matrix) - for b_conf_matrix in b_conf_matrix_list: - self._b_conf_matrix += b_conf_matrix - - if self._output_dir: - PathManager.mkdirs(self._output_dir) - file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") - with PathManager.open(file_path, "w") as f: - f.write(json.dumps(self._predictions)) - - acc = np.full(self._num_classes, np.nan, dtype=np.float) - iou = np.full(self._num_classes, np.nan, dtype=np.float) - tp = self._conf_matrix.diagonal()[:-1].astype(np.float) - pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) - class_weights = pos_gt / np.sum(pos_gt) - pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) - acc_valid = pos_gt > 0 - acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] - union = pos_gt + pos_pred - tp - iou_valid = np.logical_and(acc_valid, union > 0) - iou[iou_valid] = tp[iou_valid] / union[iou_valid] - macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) - miou = np.sum(iou[iou_valid]) / np.sum(iou_valid) - fiou = np.sum(iou[iou_valid] * class_weights[iou_valid]) - pacc = np.sum(tp) / np.sum(pos_gt) - - if self._compute_boundary_iou: - b_iou = np.full(self._num_classes, np.nan, dtype=np.float) - b_tp = self._b_conf_matrix.diagonal()[:-1].astype(np.float) - b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(np.float) - b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(np.float) - b_union = b_pos_gt + b_pos_pred - b_tp - b_iou_valid = b_union > 0 - b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid] - - res = {} - res["mIoU"] = 100 * miou - res["fwIoU"] = 100 * fiou - for i, name in enumerate(self._class_names): - res[f"IoU-{name}"] = 100 * iou[i] - if self._compute_boundary_iou: - res[f"BoundaryIoU-{name}"] = 100 * b_iou[i] - res[f"min(IoU, B-Iou)-{name}"] = 100 * min(iou[i], b_iou[i]) - res["mACC"] = 100 * macc - res["pACC"] = 100 * pacc - for i, name in enumerate(self._class_names): - res[f"ACC-{name}"] = 100 * acc[i] - - if self._output_dir: - file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") - with PathManager.open(file_path, "wb") as f: - torch.save(res, f) - results = OrderedDict({"sem_seg": res}) - self._logger.info(results) - return results - - def encode_json_sem_seg(self, sem_seg, input_file_name): - """ - Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. - See http://cocodataset.org/#format-results - """ - json_list = [] - for label in np.unique(sem_seg): - if self._contiguous_id_to_dataset_id is not None: - assert ( - label in self._contiguous_id_to_dataset_id - ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) - dataset_id = self._contiguous_id_to_dataset_id[label] - else: - dataset_id = int(label) - mask = (sem_seg == label).astype(np.uint8) - mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] - mask_rle["counts"] = mask_rle["counts"].decode("utf-8") - json_list.append( - {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} - ) - return json_list - - def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02): - assert mask.ndim == 2, "mask_to_boundary expects a 2-dimensional image" - h, w = mask.shape - diag_len = np.sqrt(h**2 + w**2) - dilation = max(1, int(round(dilation_ratio * diag_len))) - kernel = np.ones((3, 3), dtype=np.uint8) - - padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0) - eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation) - eroded_mask = eroded_mask_with_padding[1:-1, 1:-1] - boundary = mask - eroded_mask - return boundary diff --git a/spaces/cc1799/vits-uma-genshin-honkai/text/symbols.py b/spaces/cc1799/vits-uma-genshin-honkai/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/cc1799/vits-uma-genshin-honkai/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/cffl/Exploring_Intelligent_Writing_Assistance/README.md b/spaces/cffl/Exploring_Intelligent_Writing_Assistance/README.md deleted file mode 100644 index 13ea26bcc66f56a94cea4e0afcf8cb3410b17731..0000000000000000000000000000000000000000 --- a/spaces/cffl/Exploring_Intelligent_Writing_Assistance/README.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: Text Style Transfer -emoji: 📕 🔀 📘 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.10.0 -python_version: 3.9.7 -app_file: apps/app.py -models: - [ - "sentence-transformers/all-MiniLM-L6-v2", - "cffl/bert-base-styleclassification-subjective-neutral", - "cffl/bart-base-styletransfer-subjective-to-neutral", - "cointegrated/roberta-base-formality", - "prithivida/informal_to_formal_styletransfer", - ] -pinned: true -license: apache-2.0 ---- - -# Exploring Intelligent Writing Assistance - -A demonstration of how the NLP task of _text style transfer_ can be applied to enhance the human writing experience using [HuggingFace Transformers](https://huggingface.co/) and [Streamlit](https://streamlit.io/). - -![](static/images/app_screenshot.png) - -> This repo accompanies Cloudera Fast Forward Labs' [blog series](https://blog.fastforwardlabs.com/2022/03/22/an-introduction-to-text-style-transfer.html) in which we explore the task of automatically neutralizing subjectivity bias in free text. - -The goal of this application is to demonstrate how the NLP task of text style transfer can be applied to enhance the human writing experience. In this sense, we intend to peel back the curtains on how an intelligent writing assistant might function — walking through the logical steps needed to automatically re-style a piece of text (from informal-to-formal **or** subjective-to-neutral) while building up confidence in the model output. - -Through the application, we emphasize the imperative for a human-in-the-loop user experience when designing natural language generation systems. We believe text style transfer has the potential to empower writers to better express themselves, but not by blindly generating text. Rather, generative models, in conjunction with interpretability methods, should be combined to help writers understand the nuances of linguistic style and suggest stylistic edits that may improve their writing. - -## Project Structure - -``` -. -├── LICENSE -├── README.md -├── apps -│ ├── app.py -│ ├── app_utils.py -│ ├── data_utils.py -│ └── visualization_utils.py -├── requirements.txt -├── scripts -│ ├── download_models.py -│ ├── install_dependencies.py -│ └── launch_app.py -├── setup.py -├── src -│ ├── __init__.py -│ ├── content_preservation.py -│ ├── style_classification.py -│ ├── style_transfer.py -│ └── transformer_interpretability.py -├── static -│ └── images -└── tests - ├── __init__.py - └── test_model_classes.py -``` diff --git a/spaces/chansung/textual-inversion-pipeline/app.py b/spaces/chansung/textual-inversion-pipeline/app.py deleted file mode 100644 index c626f09c8fa683fdc69f3c5d6a1dcf8f8458102a..0000000000000000000000000000000000000000 --- a/spaces/chansung/textual-inversion-pipeline/app.py +++ /dev/null @@ -1,444 +0,0 @@ -""" -Adapted from https://huggingface.co/spaces/stabilityai/stable-diffusion -""" - -from tensorflow import keras - -import time - -import gradio as gr -import keras_cv - -from constants import css, examples, img_height, img_width, num_images_to_gen -from share_btn import community_icon_html, loading_icon_html, share_js - -from huggingface_hub import from_pretrained_keras -from huggingface_hub import Repository - -import json -import requests - -# MODEL_CKPT = "chansung/textual-inversion-pipeline@v1673026791" -# MODEL = from_pretrained_keras(MODEL_CKPT) - -# model = keras_cv.models.StableDiffusion( -# img_width=img_width, img_height=img_height, jit_compile=True -# ) -# model._text_encoder = MODEL -# model._text_encoder.compile(jit_compile=True) - -# # Warm-up the model. -# _ = model.text_to_image("Teddy bear", batch_size=num_images_to_gen) - -head_sha = "398e79c789669981a2ab1da1fbdafc3998c7b08a" - -def generate_image_fn(prompt: str, unconditional_guidance_scale: int) -> list: - start_time = time.time() - # `images is an `np.ndarray`. So we convert it to a list of ndarrays. - # Each ndarray represents a generated image. - # Reference: https://gradio.app/docs/#gallery - images = model.text_to_image( - prompt, - batch_size=num_images_to_gen, - unconditional_guidance_scale=unconditional_guidance_scale, - ) - end_time = time.time() - print(f"Time taken: {end_time - start_time} seconds.") - return [image for image in images] - -demoInterface = gr.Interface( - generate_image_fn, - inputs=[ - gr.Textbox( - label="Enter your prompt", - max_lines=1, -# placeholder="cute Sundar Pichai creature", - ), - gr.Slider(value=40, minimum=8, maximum=50, step=1), - ], - outputs=gr.Gallery().style(grid=[2], height="auto"), - # examples=[["cute Sundar Pichai creature", 8], ["Hello kitty", 8]], - allow_flagging=False, -) - -def avaliable_providers(): - providers = [] - - headers = { - "Content-Type": "application/json", - } - endpoint_url = "https://api.endpoints.huggingface.cloud/provider" - response = requests.get(endpoint_url, headers=headers) - - for provider in response.json()['items']: - if provider['status'] == 'available': - providers.append(provider['vendor']) - - return providers - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Your own Stable Diffusion on Google Cloud Platform - """) - - with gr.Row(): - gcp_project_id = gr.Textbox( - label="GCP project ID", - ) - gcp_region = gr.Dropdown( - ["us-central1", "asia‑east1", "asia-northeast1"], - value="us-central1", - interactive=True, - label="GCP Region" - ) - - gr.Markdown( - """ - Configurations on scalability - """) - with gr.Row(): - min_nodes = gr.Slider( - label="minimum number of nodes", - minimum=1, - maximum=10) - - max_nodes = gr.Slider( - label="maximum number of nodes", - minimum=1, - maximum=10) - - btn = gr.Button(value="Ready to Deploy!") - # btn.click(mirror, inputs=[im], outputs=[im_2]) - -def update_regions(provider): - avalialbe_regions = [] - - headers = { - "Content-Type": "application/json", - } - endpoint_url = f"https://api.endpoints.huggingface.cloud/provider/{provider}/region" - response = requests.get(endpoint_url, headers=headers) - - for region in response.json()['items']: - if region['status'] == 'available': - avalialbe_regions.append(f"{region['region']}/{region['label']}") - - return gr.Dropdown.update( - choices=avalialbe_regions, - value=avalialbe_regions[0] if len(avalialbe_regions) > 0 else None - ) - -def update_compute_options(provider, region): - region = region.split("/")[0] - avalialbe_compute_options = [] - - headers = { - "Content-Type": "application/json", - } - endpoint_url = f"https://api.endpoints.huggingface.cloud/provider/{provider}/region/{region}/compute" - print(endpoint_url) - response = requests.get(endpoint_url, headers=headers) - - for compute in response.json()['items']: - if compute['status'] == 'available': - accelerator = compute['accelerator'] - numAccelerators = compute['numAccelerators'] - memoryGb = compute['memoryGb'].replace("Gi", "GB") - architecture = compute['architecture'] - instanceType = compute['instanceType'] - - type = f"{numAccelerators}vCPU {memoryGb} · {architecture}" if accelerator == "cpu" else f"{numAccelerators}x {architecture}" - - avalialbe_compute_options.append( - f"{compute['accelerator'].upper()} [{compute['instanceSize']}] · {type} · {instanceType}" - ) - - return gr.Dropdown.update( - choices=avalialbe_compute_options, - value=avalialbe_compute_options[0] if len(avalialbe_compute_options) > 0 else None - ) - -def submit( - hf_token_input, - endpoint_name_input, - provider_selector, - region_selector, - repository_selector, - task_selector, - framework_selector, - compute_selector, - min_node_selector, - max_node_selector, - security_selector -): - compute_resources = compute_selector.split("·") - accelerator = compute_resources[0][:3].strip() - - size_l_index = compute_resources[0].index("[") - 1 - size_r_index = compute_resources[0].index("]") - size = compute_resources[0][size_l_index : size_r_index].strip() - - type = compute_resources[-1].strip() - - payload = { - "accountId": repository_selector.split("/")[0], - "compute": { - "accelerator": accelerator.lower(), - "instanceSize": size[1:], - "instanceType": type, - "scaling": { - "maxReplica": int(max_node_selector), - "minReplica": int(min_node_selector) - } - }, - "model": { - "framework": "custom", - "image": { - "huggingface": {} - }, - "repository": repository_selector.lower(), - "revision": head_sha, - "task": task_selector.lower() - }, - "name": endpoint_name_input.strip(), - "provider": { - "region": region_selector.split("/")[0].lower(), - "vendor": provider_selector.lower() - }, - "type": security_selector.lower() - } - - print(payload) - - payload = json.dumps(payload) - print(payload) - - headers = { - "Authorization": f"Bearer {hf_token_input.strip()}", - "Content-Type": "application/json", - } - endpoint_url = f"https://api.endpoints.huggingface.cloud/endpoint" - print(endpoint_url) - - response = requests.post(endpoint_url, headers=headers, data=payload) - - if response.status_code == 400: - return f"{response.text}. Malformed data in {payload}" - elif response.status_code == 401: - return "Invalid token" - elif response.status_code == 409: - return f"Endpoint {endpoint_name_input} already exists" - elif response.status_code == 202: - return f"Endpoint {endpoint_name_input} created successfully on {provider_selector.lower()} using {repository_selector.lower()}@{head_sha}. \n Please check out the progress at https://ui.endpoints.huggingface.co/endpoints." - else: - return f"something went wrong {response.status_code} = {response.text}" - -with gr.Blocks() as demo2: - gr.Markdown( - """ - ## Deploy Stable Diffusion on 🤗 Endpoint - --- - """) - - gr.Markdown(""" - - #### Your 🤗 Access Token - """) - hf_token_input = gr.Textbox( - show_label=False, - type="password" - ) - - gr.Markdown(""" - #### Decide the Endpoint name - """) - endpoint_name_input = gr.Textbox( - show_label=False - ) - - providers = avaliable_providers() - - with gr.Row(): - gr.Markdown(""" - #### Cloud Provider - """) - - gr.Markdown(""" - #### Cloud Region - """) - - with gr.Row(): - provider_selector = gr.Dropdown( - choices=providers, - interactive=True, - show_label=False, - ) - - region_selector = gr.Dropdown( - [], - value="", - interactive=True, - show_label=False, - ) - - provider_selector.change(update_regions, inputs=provider_selector, outputs=region_selector) - - with gr.Row(): - gr.Markdown(""" - #### Target Model - """) - - gr.Markdown(""" - #### Target Model Version(branch) - """) - - with gr.Row(): - repository_selector = gr.Textbox( - value="chansung/my-kitty", - interactive=False, - show_label=False, - ) - - revision_selector = gr.Textbox( - value=f"v1673365013/{head_sha[:7]}", - interactive=False, - show_label=False, - ) - - with gr.Row(): - gr.Markdown(""" - #### Task - """) - - gr.Markdown(""" - #### Framework - """) - - with gr.Row(): - task_selector = gr.Textbox( - value="Custom", - interactive=False, - show_label=False, - ) - - framework_selector = gr.Textbox( - value="TensorFlow", - interactive=False, - show_label=False, - ) - - gr.Markdown(""" - - #### Select Compute Instance Type - """) - compute_selector = gr.Dropdown( - [], - value="", - interactive=True, - show_label=False, - ) - region_selector.change(update_compute_options, inputs=[provider_selector, region_selector], outputs=compute_selector) - - with gr.Row(): - gr.Markdown(""" - #### Min Number of Nodes - """) - - gr.Markdown(""" - #### Max Number of Nodes - """) - - gr.Markdown(""" - #### Security Level - """) - - with gr.Row(): - min_node_selector = gr.Number( - value=1, - interactive=True, - show_label=False, - ) - - max_node_selector = gr.Number( - value=1, - interactive=True, - show_label=False, - ) - - security_selector = gr.Radio( - choices=["Protected", "Public", "Private"], - value="Public", - interactive=True, - show_label=False, - ) - - submit_button = gr.Button( - value="Submit", - ) - - status_txt = gr.Textbox( - value="any status update will be displayed here", - interactive=False - ) - - submit_button.click( - submit, - inputs=[ - hf_token_input, - endpoint_name_input, - provider_selector, - region_selector, - repository_selector, - task_selector, - framework_selector, - compute_selector, - min_node_selector, - max_node_selector, - security_selector], - outputs=status_txt) - - gr.Markdown(""" - #### Pricing Table(CPU) - 2023/1/11 - """) - - gr.Dataframe( - headers=["provider", "size", "$/h", "vCPUs", "Memory", "Architecture"], - datatype=["str", "str", "str", "number", "str", "str"], - row_count=8, - col_count=(6, "fixed"), - value=[ - ["aws", "small", "$0.06", 1, "2GB", "Intel Xeon - Ice Lake"], - ["aws", "medium", "$0.12", 2, "4GB", "Intel Xeon - Ice Lake"], - ["aws", "large", "$0.24", 4, "8GB", "Intel Xeon - Ice Lake"], - ["aws", "xlarge", "$0.48", 8, "16GB", "Intel Xeon - Ice Lake"], - ["azure", "small", "$0.06", 1, "2GB", "Intel Xeon"], - ["azure", "medium", "$0.12", 2, "4GB", "Intel Xeon"], - ["azure", "large", "$0.24", 4, "8GB", "Intel Xeon"], - ["azure", "xlarge", "$0.48", 8, "16GB", "Intel Xeon"], - ] - ) - - gr.Markdown(""" - #### Pricing Table(GPU) - 2023/1/11 - """) - - gr.Dataframe( - headers=["provider", "size", "$/h", "GPUs", "Memory", "Architecture"], - datatype=["str", "str", "str", "number", "str", "str"], - row_count=6, - col_count=(6, "fixed"), - value=[ - ["aws", "small", "$0.60", 1, "14GB", "NVIDIA T4"], - ["aws", "medium", "$1.30", 1, "24GB", "NVIDIA A10G"], - ["aws", "large", "$4.50", 4, "156B", "NVIDIA T4"], - ["aws", "xlarge", "$6.50", 1, "80GB", "NVIDIA A100"], - ["aws", "xxlarge", "$7.00", 4, "96GB", "NVIDIA A10G"], - ["aws", "xxxlarge", "$45.0", 8, "640GB", "NVIDIA A100"], - ] - ) - -gr.TabbedInterface( - [demo2], ["Deploy on 🤗 Endpoint"] -).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/chawiii/open-reverse-proxy/README.md b/spaces/chawiii/open-reverse-proxy/README.md deleted file mode 100644 index e7579ba2ac2b5c6120532fa59964bf5afc22c9ff..0000000000000000000000000000000000000000 --- a/spaces/chawiii/open-reverse-proxy/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Open Reverse Proxy -emoji: 📚 -colorFrom: purple -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/commands/convert.py b/spaces/chendl/compositional_test/transformers/src/transformers/commands/convert.py deleted file mode 100644 index b46e14f5a673205eba2aea97f762ba80a50936ee..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/commands/convert.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from argparse import ArgumentParser, Namespace - -from ..utils import logging -from . import BaseTransformersCLICommand - - -def convert_command_factory(args: Namespace): - """ - Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. - - Returns: ServeCommand - """ - return ConvertCommand( - args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name - ) - - -IMPORT_ERROR_MESSAGE = """ -transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires -TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. -""" - - -class ConvertCommand(BaseTransformersCLICommand): - @staticmethod - def register_subcommand(parser: ArgumentParser): - """ - Register this command to argparse so it's available for the transformer-cli - - Args: - parser: Root parser to register command-specific arguments - """ - train_parser = parser.add_parser( - "convert", - help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", - ) - train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.") - train_parser.add_argument( - "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder." - ) - train_parser.add_argument( - "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output." - ) - train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.") - train_parser.add_argument( - "--finetuning_task_name", - type=str, - default=None, - help="Optional fine-tuning task name if the TF model was a finetuned model.", - ) - train_parser.set_defaults(func=convert_command_factory) - - def __init__( - self, - model_type: str, - tf_checkpoint: str, - pytorch_dump_output: str, - config: str, - finetuning_task_name: str, - *args, - ): - self._logger = logging.get_logger("transformers-cli/converting") - - self._logger.info(f"Loading model {model_type}") - self._model_type = model_type - self._tf_checkpoint = tf_checkpoint - self._pytorch_dump_output = pytorch_dump_output - self._config = config - self._finetuning_task_name = finetuning_task_name - - def run(self): - if self._model_type == "albert": - try: - from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( - convert_tf_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "bert": - try: - from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( - convert_tf_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "funnel": - try: - from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( - convert_tf_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "t5": - try: - from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "gpt": - from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( - convert_openai_checkpoint_to_pytorch, - ) - - convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "transfo_xl": - try: - from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( - convert_transfo_xl_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - if "ckpt" in self._tf_checkpoint.lower(): - TF_CHECKPOINT = self._tf_checkpoint - TF_DATASET_FILE = "" - else: - TF_DATASET_FILE = self._tf_checkpoint - TF_CHECKPOINT = "" - convert_transfo_xl_checkpoint_to_pytorch( - TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE - ) - elif self._model_type == "gpt2": - try: - from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import ( - convert_gpt2_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "xlnet": - try: - from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( - convert_xlnet_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - convert_xlnet_checkpoint_to_pytorch( - self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name - ) - elif self._model_type == "xlm": - from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( - convert_xlm_checkpoint_to_pytorch, - ) - - convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output) - elif self._model_type == "lxmert": - from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( - convert_lxmert_checkpoint_to_pytorch, - ) - - convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output) - elif self._model_type == "rembert": - from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( - convert_rembert_tf_checkpoint_to_pytorch, - ) - - convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - else: - raise ValueError( - "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" - ) diff --git a/spaces/chilge/Fushimi/app.py b/spaces/chilge/Fushimi/app.py deleted file mode 100644 index 1ea281f018a29105cb4d07973bdfa59038d821af..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import io - -import gradio as gr -import librosa -import numpy as np -import soundfile -import torch -from inference.infer_tool import Svc -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) - -model_name = "logs/32k/gx.pth" -config_name = "configs/config.json" - -svc_model = Svc(model_name, config_name) -sid_map = { - "伏见弓弦": "fushimi" -} - - -def vc_fn(sid, input_audio, vc_transform): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - # print(audio.shape,sampling_rate) - duration = audio.shape[0] / sampling_rate - if duration > 45: - return "请上传小于45s的音频,需要转换长音频请本地进行转换", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - print(audio.shape) - out_wav_path = io.BytesIO() - soundfile.write(out_wav_path, audio, 16000, format="wav") - out_wav_path.seek(0) - - sid = sid_map[sid] - out_audio, out_sr = svc_model.infer(sid, vc_transform, out_wav_path) - _audio = out_audio.cpu().numpy() - return "Success", (32000, _audio) - - -app = gr.Blocks() -with app: - with gr.Tabs(): - with gr.TabItem("Basic"): - gr.Markdown(value=""" - - - i7000如果要在本地使用该demo,请使用git lfs clone 该仓库,安装requirements.txt后运行app.py即可 - - 项目改写基于 https://huggingface.co/spaces/innnky/nyaru-svc-3.0 - - 本地合成可以删除26、27两行代码以解除合成45s长度限制""") - sid = gr.Dropdown(label="音色", choices=["伏见弓弦"], value="fushimi") - vc_input3 = gr.Audio(label="上传音频(长度小于45秒)") - vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0) - vc_submit = gr.Button("转换", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - vc_submit.click(vc_fn, [sid, vc_input3, vc_transform], [vc_output1, vc_output2]) - - app.launch() diff --git a/spaces/chilge/Fushimi/losses.py b/spaces/chilge/Fushimi/losses.py deleted file mode 100644 index 41f9be6980713a46824ae9ec5eb8fd7c515d89c5..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - #print(logs_p) - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/exceptions.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/exceptions.py deleted file mode 100644 index 2d6e1a44b6a1667d1c302869ff2a332634fda47e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/exceptions.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -fsspec user-defined exception classes -""" -import asyncio - - -class BlocksizeMismatchError(ValueError): - """ - Raised when a cached file is opened with a different blocksize than it was - written with - """ - - ... - - -class FSTimeoutError(asyncio.TimeoutError): - """ - Raised when a fsspec function timed out occurs - """ - - ... diff --git a/spaces/cihyFjudo/fairness-paper-search/Ca Adulte Finder Ami.md b/spaces/cihyFjudo/fairness-paper-search/Ca Adulte Finder Ami.md deleted file mode 100644 index 0722914e5e4a8d13e55f88cc5301ff94a70fda43..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Ca Adulte Finder Ami.md +++ /dev/null @@ -1,6 +0,0 @@ -
-

finder.com is an independent comparison platform and information service that aims to provide you with the tools you need to make better decisions. While we are independent, we may receive compensation from our partners for featured placement of their products or services. We may also receive compensation if you click on certain links posted on our site.

-

Ca Adulte Finder Ami


Downloadhttps://tinurli.com/2uwhAV



-

finder.com is an independent comparison platform and information service that aims to provide you with the tools you need to make better decisions. While we are independent, the offers that appear on this site are from companies from which finder.com receives compensation. We may receive compensation from our partners for placement of their products or services. We may also receive compensation if you click on certain links posted on our site. While compensation arrangements may affect the order, position or placement of product information, it doesn't influence our assessment of those products. Please don't interpret the order in which products appear on our Site as any endorsement or recommendation from us. finder.com compares a wide range of products, providers and services but we don't provide information on all available products, providers or services. Please appreciate that there may be other options available to you than the products, providers or services covered by our service.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Eminemdiscographydownload TOPtorrent.md b/spaces/cihyFjudo/fairness-paper-search/Eminemdiscographydownload TOPtorrent.md deleted file mode 100644 index 91d07a5301b38f1d9e5f6806494e6f9fe6e4712e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Eminemdiscographydownload TOPtorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

eminemdiscographydownloadtorrent


Download ⚙⚙⚙ https://tinurli.com/2uwkC0



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Mobilisation Phase 1971 To 1990.md b/spaces/cihyFjudo/fairness-paper-search/Mobilisation Phase 1971 To 1990.md deleted file mode 100644 index 43c25f68bd2c9913818d125a40a5eb2813cf851d..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Mobilisation Phase 1971 To 1990.md +++ /dev/null @@ -1,13 +0,0 @@ -
-

Understanding the mobilisation of trapped globules of non-wetting phase during two-phase flow has been the aim of numerous studies. However, the driving forces for the mobilisation of the trapped phases are still not well understood. Also, there is little information about what happens within a globule before, at the onset and during mobilization. In this work, we used micro-particle tracking velocimetry in a micro-fluidic model in order to visualise the velocity distributions inside the trapped phase globules prior and during mobilisation. Therefore, time-averaged and instantaneous velocity vectors have been determined using fluorescent microscopy. As a porous medium, we used a polydimethylsiloxane (PDMS) micro-model with a well-defined pore structure, where drainage and imbibition experiments were conducted. Three different geometries of trapped non-wetting globules, namely droplets, blobs and ganglia were investigated. We observed internal circulations inside the trapped phase globules, leading to the formation of vortices. The direction of circulating flow within a globule is dictated by the drag force exerted on it by the flowing wetting phase. This is illustrated by calculating and analyzing the drag force (per unit area) along fluid-fluid interfaces. In the case of droplets and blobs, only one vortex is formed. The flow field within a ganglion is much more complex and more vortices can be formed. The circulation velocities are largest at the fluid-fluid interfaces, along which the wetting phase flows and decreases towards the middle of the globule. The circulation velocities increased proportionally with the increase of wetting phase average velocity (or capillary number). The vortices remain stable as long as the globules are trapped, start to change at the onset of mobilization and disappear during the movement of globules. They reappear when the globules get stranded. Droplets are less prone to mobilization; blobs get mobilised in whole; while ganglia may get ruptured and get mobilised only partially.

-

Trapping and mobilisation of trapped non-wetting phase has been the subject of numerous studies. In these studies, the behaviour of trapped phase has been linked to a variety of parameters, such as capillary number, viscosity ratio and fluid topology. Here, we define the capillary number as the ratio of product of average wetting phase flow velocity and its viscosity to the fluid-fluid interfacial tension. It is observed that the mobilisation or complete removal of ganglia from the host porous medium is possible only if the wetting phase flows at high capillary numbers2,3,4,5,6,7,8. The effect of capillary number, Ca (usually defined in terms of the wetting phase), on trapping and mobilization of ganglia has been extensively studied1,4,5,9. Experiments have shown that the increase of Ca leads to a bigger chance of ganglia mobilisation, regardless of other physical and flow parameters10. It is reported that for the mobilization of the trapped non-wetting phase to occur, the imposed capillary number must be 25 times larger than the capillary number at which trapping occurred4. In the case of sandstone, in order to maximize recovery of the trapped non-wetting phase, the applied Ca number should be 100 times larger than the capillary number required for trapping5. Figure 1 shows the internal circulation in the form of counter-rotating vortices within a stationary droplet surrounded by another flowing fluid, marked by a red circle, as adopted and modified from Dong & Sau11.

-

Mobilisation Phase 1971 To 1990


Downloadhttps://tinurli.com/2uwk3K



-

Despite the wealth of literature focused on the entrapment and mobilisation of ganglia, there is no experimental study of the momentum exchange between two fluid phases and its effect on the remobilisation and movement of the non-wetting phase. There are some two-phase systems, albeit not in porous media, where such studies have been carried out. One example is the flow around droplets inside another fluid27,28,29,30,31. In case of spherical droplets, an internal circulation in the form of counter-rotating vortices, shown in Fig. 1, has been observed11,32,33. This is due to the momentum transfer between the two immiscible fluids, which depends strongly on their viscosity ratio34,35. Similar recirculations have been observed within falling droplets, with their intensity influenced by the resulted drag coefficient36. Another situation analogous to the ganglia in porous media is the movement of microfluidic droplets in micro-channels37,38,39,40,41. Experimental and numerical works on liquid-liquid slug flows in capillary channels have shown internal recirculations within both phases, which were affected by capillary forces42, the viscosity ratio between the fluids43,44, the channel flow velocity and the slug size45,46. Low capillary numbers resulted in slower channel flows and large slugs caused the attenuation of recirculation zones inside the mobilised slugs. There are two fundamental differences between such two-phase flow systems and the case of entrapped non-wetting phases in porous media. One is the presence of capillary forces that can cause the trapping of ganglia and the other is the blocking of droplets or ganglia by the solid phase. The geometry of the porous medium is of course much more complex than a micro-channel and large local variations in wetting phase velocity field may exist. Stagnant areas may exist where the wetting phase velocity could be close to zero. In such areas, the viscous drag exerted on the fluid-fluid interface would be negligible and thus there would no momentum exchange between the two fluid phases. These are passive interfaces. In areas that the wetting phase velocity is nonzero, we have active interfaces, where momentum exchange occurs, resulting in circulations within the ganglia.

-

The mobilization of non-wetting phase globules is controlled by the following four forces: drag forces exerted by the flowing wetting phase, friction exerted by the solid phase, capillary forces and the pressure differences in the wetting phase across a globule. As explained above, the drag force is believed to cause flow circulation within the globule. In fact, in order to quantify the drag force, one needs to measure detailed velocity field in the vicinity of the fluid-fluid interface within either the wetting phase or the non-wetting phase. In other words, knowledge of induced flow inside trapped non-wetting globules is important for the overall understanding of their fate. An effective technique for obtaining such information is microscopic particle velocimetry, where florescent tracers within the fluids are imaged in order to obtain the fluid velocity field. There are two major approaches: Particle Tracking Velocimetry (PTV) and Particle Imaging Velocimetry (PIV). PTV is a Lagrangian-based approach and provides trajectories and velocity magnitudes of flowing fluid, where PIV is Eulerian based and gives the velocity contour lines. Reviews of these techniques can be found in the work of Lindken et al.47. These techniques have been widely used in microfluidic devices33,36,38,39. x In this study, we have developed a micromodel setup and employed microscopic particle tracking velocimetry (μPTV) in order to visualise and quantify the flow inside trapped non-wetting phase. Our aim is to follow the development of velocity field within individual droplets, blobs and ganglia as the wetting phase capillary number increases, until the mobilisation of trapped phase occurs. We quantify the drag force exerted along the fluid-fluid interfaces and discuss its role in determining the direction of circulating flow within a globule. We discuss the reasons behind the fact that some non-wetting phase globules mobilise and some do not. Our highly-controlled experiment and detailed information about the velocity field within the trapped phase elements and mobilization of some of them will be also valuable for the testing and validation of direct pore-scale simulation methods such as Lattice-Boltzmann models.

-

-

Our goal is to quantify and analyse the velocity distributions in trapped non-wetting phase prior and during mobilisation. For a better understanding of the process, three different types of trapped bodies are identified and investigated separately: droplets, blobs and ganglia. The trapped fluid bodies smaller than a single pore are known as droplets. Blobs are larger than a droplet and fully occupy a single pore. So, they have more contact surfaces with the solid phase. The effect of capillary forces is therefore more pronounced compared to droplets, adding complexity to the momentum transfer and mobilisation processes. Larger trapped non-wetting phase bodies, which occupy more than one pore, are referred to as ganglia. As explained above, the capillary number was increased stepwise until mobilisation of some trapped elements took place. In the cases of droplets and blobs, this covered a range of about two orders of magnitude. We focus on two key features of the process: the evolution of induced internal flow due to viscous drag and the mobilisation of the trapped phase.

-

Breakup of a ganglion is a common occurrence that leads to a partial mobilisation of ganglia. The breakup, as described by Lenormand & Sarr12, is commonly characterised by the deformation of the ganglion and the formation of a non-wetting phase filament. The increase of wetting phase flow local velocity and pressure causes the filament to get thinner and eventually to rupture. Due to the rapture, two daughter ganglia are formed. This happened in the case of the ganglion shown in Figs 9 and 10.

-

We performed two-phase flow experiments with focus on observing the evolution of flow within trapped non-wetting phase globules prior, during and after their mobilisation. Flow visualisation and quantitative velocity measurements were obtained using Particle Tracking Velocimetry (PTV).

-

The trapped globules can become mobilised due to the momentum transfer from the wetting phase; this occurs in the form of viscous drag as well as a difference in the local wetting phase pressure between upstream and downstream liquid-liquid interfaces of the globules. Droplets are trapped at a relatively low capillary number and do not mobilise even at high capillary numbers. This is because they can adopt their shape in order to minimise the drag force. Also, the pressure difference in the wetting phase along the droplet is not large enough to dislodge it. Blobs can get mobilised once the momentum transfer is large enough to dislodge them. In concordance with the literature, Blob-1 was mobilised at a Ca 20 times larger than the trapping Ca5. However, Blob-2 was mobilised at Ca 8 times larger than the trapping Ca. This concludes that the local flow conditions are the main controlling factors for their mobilisation. A blob trapped near the side walls of the micro-model was less prone to mobilisation than those trapped more towards the middle of the micro-model.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cleanmaster/so-vits-svc-akagi/inference_main.py b/spaces/cleanmaster/so-vits-svc-akagi/inference_main.py deleted file mode 100644 index db6f9634bb276097eae82cac1776a76150003660..0000000000000000000000000000000000000000 --- a/spaces/cleanmaster/so-vits-svc-akagi/inference_main.py +++ /dev/null @@ -1,56 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - -model_path = "logs/32k/G_174000-Copy1.pth" -config_path = "configs/config.json" -svc_model = Svc(model_path, config_path) -infer_tool.mkdir(["raw", "results"]) - -# 支持多个wav文件,放在raw文件夹下 -clean_names = ["君の知らない物語-src"] -trans = [-5] # 音高调整,支持正负(半音) -spk_list = ['yunhao'] # 每次同时合成多语者音色 -slice_db = -40 # 默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50 -wav_format = 'flac' # 音频输出格式 - -infer_tool.fill_a_to_b(trans, clean_names) -for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - else: - out_audio, out_sr = svc_model.infer(spk, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - - res_path = f'./results/{clean_name}_{tran}key_{spk}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/chunk.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/chunk.py deleted file mode 100644 index 076cbc4370b4471c2074cade279250a3ebec9041..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/chunk.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import annotations - -import math - - -def calc_chunk_sizes( - chunk_size: int | tuple[int, int] | None, - chunk_count: int | tuple[int, int] | None, - total_chunk_count: int | None, - ny: int, - nx: int, -) -> tuple[int, int]: - """Calculate chunk sizes. - - Args: - chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same - size in both directions if only one is specified. - chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the - same count in both irections if only one is specified. - total_chunk_count (int, optional): Total number of chunks. - ny (int): Number of grid points in y-direction. - nx (int): Number of grid points in x-direction. - - Return: - tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size). - - Note: - A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be - specified. - """ - if sum([chunk_size is not None, chunk_count is not None, total_chunk_count is not None]) > 1: - raise ValueError("Only one of chunk_size, chunk_count and total_chunk_count should be set") - - if total_chunk_count is not None: - max_chunk_count = (nx-1)*(ny-1) - total_chunk_count = min(max(total_chunk_count, 1), max_chunk_count) - if total_chunk_count == 1: - chunk_size = 0 - elif total_chunk_count == max_chunk_count: - chunk_size = (1, 1) - else: - factors = two_factors(total_chunk_count) - if ny > nx: - chunk_count = factors - else: - chunk_count = (factors[1], factors[0]) - - if chunk_count is not None: - if isinstance(chunk_count, tuple): - y_chunk_count, x_chunk_count = chunk_count - else: - y_chunk_count = x_chunk_count = chunk_count - x_chunk_count = min(max(x_chunk_count, 1), nx-1) - y_chunk_count = min(max(y_chunk_count, 1), ny-1) - chunk_size = (math.ceil((ny-1) / y_chunk_count), math.ceil((nx-1) / x_chunk_count)) - - if chunk_size is None: - y_chunk_size = x_chunk_size = 0 - elif isinstance(chunk_size, tuple): - y_chunk_size, x_chunk_size = chunk_size - else: - y_chunk_size = x_chunk_size = chunk_size - - if x_chunk_size < 0 or y_chunk_size < 0: - raise ValueError("chunk_size cannot be negative") - - return y_chunk_size, x_chunk_size - - -def two_factors(n: int) -> tuple[int, int]: - """Split an integer into two integer factors. - - The two factors will be as close as possible to the sqrt of n, and are returned in decreasing - order. Worst case returns (n, 1). - - Args: - n (int): The integer to factorize. - - Return: - tuple(int, int): The two factors of n, in decreasing order. - """ - i = math.ceil(math.sqrt(n)) - while n % i != 0: - i -= 1 - j = n // i - if i > j: - return i, j - else: - return j, i diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/tz/_factories.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/tz/_factories.py deleted file mode 100644 index f8a65891a023ebf9eb0c24d391ba67541b7133f1..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/dateutil/tz/_factories.py +++ /dev/null @@ -1,80 +0,0 @@ -from datetime import timedelta -import weakref -from collections import OrderedDict - -from six.moves import _thread - - -class _TzSingleton(type): - def __init__(cls, *args, **kwargs): - cls.__instance = None - super(_TzSingleton, cls).__init__(*args, **kwargs) - - def __call__(cls): - if cls.__instance is None: - cls.__instance = super(_TzSingleton, cls).__call__() - return cls.__instance - - -class _TzFactory(type): - def instance(cls, *args, **kwargs): - """Alternate constructor that returns a fresh instance""" - return type.__call__(cls, *args, **kwargs) - - -class _TzOffsetFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - cls._cache_lock = _thread.allocate_lock() - - def __call__(cls, name, offset): - if isinstance(offset, timedelta): - key = (name, offset.total_seconds()) - else: - key = (name, offset) - - instance = cls.__instances.get(key, None) - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(name, offset)) - - # This lock may not be necessary in Python 3. See GH issue #901 - with cls._cache_lock: - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - - -class _TzStrFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - cls.__cache_lock = _thread.allocate_lock() - - def __call__(cls, s, posix_offset=False): - key = (s, posix_offset) - instance = cls.__instances.get(key, None) - - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(s, posix_offset)) - - # This lock may not be necessary in Python 3. See GH issue #901 - with cls.__cache_lock: - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - diff --git a/spaces/cncn102/bingo1/tests/parse.ts b/spaces/cncn102/bingo1/tests/parse.ts deleted file mode 100644 index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000 --- a/spaces/cncn102/bingo1/tests/parse.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { promises as fs } from 'fs' -import { join } from 'path' -import { parseHeadersFromCurl } from '@/lib/utils' - -(async () => { - const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8') - const headers = parseHeadersFromCurl(content) - console.log(headers) - - const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8') - const cmdHeaders = parseHeadersFromCurl(cmdContent) - console.log(cmdHeaders) -})() diff --git a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/gcc/stdatomic.h b/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/gcc/stdatomic.h deleted file mode 100644 index e13ed0e068b8fb50c7a69ca19a2600dae31a5a21..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/compat/atomics/gcc/stdatomic.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* - * based on vlc_atomic.h from VLC - * Copyright (C) 2010 Rémi Denis-Courmont - */ - -#ifndef COMPAT_ATOMICS_GCC_STDATOMIC_H -#define COMPAT_ATOMICS_GCC_STDATOMIC_H - -#include -#include - -#define ATOMIC_FLAG_INIT 0 - -#define ATOMIC_VAR_INIT(value) (value) - -#define atomic_init(obj, value) \ -do { \ - *(obj) = (value); \ -} while(0) - -#define kill_dependency(y) ((void)0) - -#define atomic_thread_fence(order) \ - __sync_synchronize() - -#define atomic_signal_fence(order) \ - ((void)0) - -#define atomic_is_lock_free(obj) 0 - -typedef _Bool atomic_flag; -typedef _Bool atomic_bool; -typedef char atomic_char; -typedef signed char atomic_schar; -typedef unsigned char atomic_uchar; -typedef short atomic_short; -typedef unsigned short atomic_ushort; -typedef int atomic_int; -typedef unsigned int atomic_uint; -typedef long atomic_long; -typedef unsigned long atomic_ulong; -typedef long long atomic_llong; -typedef unsigned long long atomic_ullong; -typedef wchar_t atomic_wchar_t; -typedef int_least8_t atomic_int_least8_t; -typedef uint_least8_t atomic_uint_least8_t; -typedef int_least16_t atomic_int_least16_t; -typedef uint_least16_t atomic_uint_least16_t; -typedef int_least32_t atomic_int_least32_t; -typedef uint_least32_t atomic_uint_least32_t; -typedef int_least64_t atomic_int_least64_t; -typedef uint_least64_t atomic_uint_least64_t; -typedef int_fast8_t atomic_int_fast8_t; -typedef uint_fast8_t atomic_uint_fast8_t; -typedef int_fast16_t atomic_int_fast16_t; -typedef uint_fast16_t atomic_uint_fast16_t; -typedef int_fast32_t atomic_int_fast32_t; -typedef uint_fast32_t atomic_uint_fast32_t; -typedef int_fast64_t atomic_int_fast64_t; -typedef uint_fast64_t atomic_uint_fast64_t; -typedef intptr_t atomic_intptr_t; -typedef uintptr_t atomic_uintptr_t; -typedef size_t atomic_size_t; -typedef ptrdiff_t atomic_ptrdiff_t; -typedef intmax_t atomic_intmax_t; -typedef uintmax_t atomic_uintmax_t; - -#define atomic_store(object, desired) \ -do { \ - *(object) = (desired); \ - __sync_synchronize(); \ -} while (0) - -#define atomic_store_explicit(object, desired, order) \ - atomic_store(object, desired) - -#define atomic_load(object) \ - (__sync_synchronize(), *(object)) - -#define atomic_load_explicit(object, order) \ - atomic_load(object) - -#define atomic_exchange(object, desired) \ -({ \ - __typeof__(object) _obj = (object); \ - __typeof__(*object) _old; \ - do \ - _old = atomic_load(_obj); \ - while (!__sync_bool_compare_and_swap(_obj, _old, (desired))); \ - _old; \ -}) - -#define atomic_exchange_explicit(object, desired, order) \ - atomic_exchange(object, desired) - -#define atomic_compare_exchange_strong(object, expected, desired) \ -({ \ - __typeof__(object) _exp = (expected); \ - __typeof__(*object) _old = *_exp; \ - *_exp = __sync_val_compare_and_swap((object), _old, (desired)); \ - *_exp == _old; \ -}) - -#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \ - atomic_compare_exchange_strong(object, expected, desired) - -#define atomic_compare_exchange_weak(object, expected, desired) \ - atomic_compare_exchange_strong(object, expected, desired) - -#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \ - atomic_compare_exchange_weak(object, expected, desired) - -#define atomic_fetch_add(object, operand) \ - __sync_fetch_and_add(object, operand) - -#define atomic_fetch_add_explicit(object, operand, order) \ - atomic_fetch_add(object, operand) - -#define atomic_fetch_sub(object, operand) \ - __sync_fetch_and_sub(object, operand) - -#define atomic_fetch_sub_explicit(object, operand, order) \ - atomic_fetch_sub(object, operand) - -#define atomic_fetch_or(object, operand) \ - __sync_fetch_and_or(object, operand) - -#define atomic_fetch_or_explicit(object, operand, order) \ - atomic_fetch_or(object, operand) - -#define atomic_fetch_xor(object, operand) \ - __sync_fetch_and_xor(object, operand) - -#define atomic_fetch_xor_explicit(object, operand, order) \ - atomic_fetch_xor(object, operand) - -#define atomic_fetch_and(object, operand) \ - __sync_fetch_and_and(object, operand) - -#define atomic_fetch_and_explicit(object, operand, order) \ - atomic_fetch_and(object, operand) - -#define atomic_flag_test_and_set(object) \ - atomic_exchange(object, 1) - -#define atomic_flag_test_and_set_explicit(object, order) \ - atomic_flag_test_and_set(object) - -#define atomic_flag_clear(object) \ - atomic_store(object, 0) - -#define atomic_flag_clear_explicit(object, order) \ - atomic_flag_clear(object) - -#endif /* COMPAT_ATOMICS_GCC_STDATOMIC_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/fftools/ffplay.c b/spaces/colakin/video-generater/public/ffmpeg/fftools/ffplay.c deleted file mode 100644 index 15fd64497403dbb1ff9aaf3ad73130a410544afd..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/fftools/ffplay.c +++ /dev/null @@ -1,3719 +0,0 @@ -/* - * Copyright (c) 2003 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * simple media player based on the FFmpeg libraries - */ - -#include "config.h" -#include "config_components.h" -#include -#include -#include -#include -#include - -#include "libavutil/avstring.h" -#include "libavutil/channel_layout.h" -#include "libavutil/eval.h" -#include "libavutil/mathematics.h" -#include "libavutil/pixdesc.h" -#include "libavutil/imgutils.h" -#include "libavutil/dict.h" -#include "libavutil/fifo.h" -#include "libavutil/parseutils.h" -#include "libavutil/samplefmt.h" -#include "libavutil/time.h" -#include "libavutil/bprint.h" -#include "libavformat/avformat.h" -#include "libavdevice/avdevice.h" -#include "libswscale/swscale.h" -#include "libavutil/opt.h" -#include "libavcodec/avfft.h" -#include "libswresample/swresample.h" - -#include "libavfilter/avfilter.h" -#include "libavfilter/buffersink.h" -#include "libavfilter/buffersrc.h" - -#include -#include - -#include "cmdutils.h" -#include "opt_common.h" - -const char program_name[] = "ffplay"; -const int program_birth_year = 2003; - -#define MAX_QUEUE_SIZE (15 * 1024 * 1024) -#define MIN_FRAMES 25 -#define EXTERNAL_CLOCK_MIN_FRAMES 2 -#define EXTERNAL_CLOCK_MAX_FRAMES 10 - -/* Minimum SDL audio buffer size, in samples. */ -#define SDL_AUDIO_MIN_BUFFER_SIZE 512 -/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */ -#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30 - -/* Step size for volume control in dB */ -#define SDL_VOLUME_STEP (0.75) - -/* no AV sync correction is done if below the minimum AV sync threshold */ -#define AV_SYNC_THRESHOLD_MIN 0.04 -/* AV sync correction is done if above the maximum AV sync threshold */ -#define AV_SYNC_THRESHOLD_MAX 0.1 -/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */ -#define AV_SYNC_FRAMEDUP_THRESHOLD 0.1 -/* no AV correction is done if too big error */ -#define AV_NOSYNC_THRESHOLD 10.0 - -/* maximum audio speed change to get correct sync */ -#define SAMPLE_CORRECTION_PERCENT_MAX 10 - -/* external clock speed adjustment constants for realtime sources based on buffer fullness */ -#define EXTERNAL_CLOCK_SPEED_MIN 0.900 -#define EXTERNAL_CLOCK_SPEED_MAX 1.010 -#define EXTERNAL_CLOCK_SPEED_STEP 0.001 - -/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */ -#define AUDIO_DIFF_AVG_NB 20 - -/* polls for possible required screen refresh at least this often, should be less than 1/fps */ -#define REFRESH_RATE 0.01 - -/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */ -/* TODO: We assume that a decoded and resampled frame fits into this buffer */ -#define SAMPLE_ARRAY_SIZE (8 * 65536) - -#define CURSOR_HIDE_DELAY 1000000 - -#define USE_ONEPASS_SUBTITLE_RENDER 1 - -typedef struct MyAVPacketList { - AVPacket *pkt; - int serial; -} MyAVPacketList; - -typedef struct PacketQueue { - AVFifo *pkt_list; - int nb_packets; - int size; - int64_t duration; - int abort_request; - int serial; - SDL_mutex *mutex; - SDL_cond *cond; -} PacketQueue; - -#define VIDEO_PICTURE_QUEUE_SIZE 3 -#define SUBPICTURE_QUEUE_SIZE 16 -#define SAMPLE_QUEUE_SIZE 9 -#define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE)) - -typedef struct AudioParams { - int freq; - AVChannelLayout ch_layout; - enum AVSampleFormat fmt; - int frame_size; - int bytes_per_sec; -} AudioParams; - -typedef struct Clock { - double pts; /* clock base */ - double pts_drift; /* clock base minus time at which we updated the clock */ - double last_updated; - double speed; - int serial; /* clock is based on a packet with this serial */ - int paused; - int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */ -} Clock; - -typedef struct FrameData { - int64_t pkt_pos; -} FrameData; - -/* Common struct for handling all types of decoded data and allocated render buffers. */ -typedef struct Frame { - AVFrame *frame; - AVSubtitle sub; - int serial; - double pts; /* presentation timestamp for the frame */ - double duration; /* estimated duration of the frame */ - int64_t pos; /* byte position of the frame in the input file */ - int width; - int height; - int format; - AVRational sar; - int uploaded; - int flip_v; -} Frame; - -typedef struct FrameQueue { - Frame queue[FRAME_QUEUE_SIZE]; - int rindex; - int windex; - int size; - int max_size; - int keep_last; - int rindex_shown; - SDL_mutex *mutex; - SDL_cond *cond; - PacketQueue *pktq; -} FrameQueue; - -enum { - AV_SYNC_AUDIO_MASTER, /* default choice */ - AV_SYNC_VIDEO_MASTER, - AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */ -}; - -typedef struct Decoder { - AVPacket *pkt; - PacketQueue *queue; - AVCodecContext *avctx; - int pkt_serial; - int finished; - int packet_pending; - SDL_cond *empty_queue_cond; - int64_t start_pts; - AVRational start_pts_tb; - int64_t next_pts; - AVRational next_pts_tb; - SDL_Thread *decoder_tid; -} Decoder; - -typedef struct VideoState { - SDL_Thread *read_tid; - const AVInputFormat *iformat; - int abort_request; - int force_refresh; - int paused; - int last_paused; - int queue_attachments_req; - int seek_req; - int seek_flags; - int64_t seek_pos; - int64_t seek_rel; - int read_pause_return; - AVFormatContext *ic; - int realtime; - - Clock audclk; - Clock vidclk; - Clock extclk; - - FrameQueue pictq; - FrameQueue subpq; - FrameQueue sampq; - - Decoder auddec; - Decoder viddec; - Decoder subdec; - - int audio_stream; - - int av_sync_type; - - double audio_clock; - int audio_clock_serial; - double audio_diff_cum; /* used for AV difference average computation */ - double audio_diff_avg_coef; - double audio_diff_threshold; - int audio_diff_avg_count; - AVStream *audio_st; - PacketQueue audioq; - int audio_hw_buf_size; - uint8_t *audio_buf; - uint8_t *audio_buf1; - unsigned int audio_buf_size; /* in bytes */ - unsigned int audio_buf1_size; - int audio_buf_index; /* in bytes */ - int audio_write_buf_size; - int audio_volume; - int muted; - struct AudioParams audio_src; - struct AudioParams audio_filter_src; - struct AudioParams audio_tgt; - struct SwrContext *swr_ctx; - int frame_drops_early; - int frame_drops_late; - - enum ShowMode { - SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB - } show_mode; - int16_t sample_array[SAMPLE_ARRAY_SIZE]; - int sample_array_index; - int last_i_start; - RDFTContext *rdft; - int rdft_bits; - FFTSample *rdft_data; - int xpos; - double last_vis_time; - SDL_Texture *vis_texture; - SDL_Texture *sub_texture; - SDL_Texture *vid_texture; - - int subtitle_stream; - AVStream *subtitle_st; - PacketQueue subtitleq; - - double frame_timer; - double frame_last_returned_time; - double frame_last_filter_delay; - int video_stream; - AVStream *video_st; - PacketQueue videoq; - double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity - struct SwsContext *sub_convert_ctx; - int eof; - - char *filename; - int width, height, xleft, ytop; - int step; - - int vfilter_idx; - AVFilterContext *in_video_filter; // the first filter in the video chain - AVFilterContext *out_video_filter; // the last filter in the video chain - AVFilterContext *in_audio_filter; // the first filter in the audio chain - AVFilterContext *out_audio_filter; // the last filter in the audio chain - AVFilterGraph *agraph; // audio filter graph - - int last_video_stream, last_audio_stream, last_subtitle_stream; - - SDL_cond *continue_read_thread; -} VideoState; - -/* options specified by the user */ -static const AVInputFormat *file_iformat; -static const char *input_filename; -static const char *window_title; -static int default_width = 640; -static int default_height = 480; -static int screen_width = 0; -static int screen_height = 0; -static int screen_left = SDL_WINDOWPOS_CENTERED; -static int screen_top = SDL_WINDOWPOS_CENTERED; -static int audio_disable; -static int video_disable; -static int subtitle_disable; -static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0}; -static int seek_by_bytes = -1; -static float seek_interval = 10; -static int display_disable; -static int borderless; -static int alwaysontop; -static int startup_volume = 100; -static int show_status = -1; -static int av_sync_type = AV_SYNC_AUDIO_MASTER; -static int64_t start_time = AV_NOPTS_VALUE; -static int64_t duration = AV_NOPTS_VALUE; -static int fast = 0; -static int genpts = 0; -static int lowres = 0; -static int decoder_reorder_pts = -1; -static int autoexit; -static int exit_on_keydown; -static int exit_on_mousedown; -static int loop = 1; -static int framedrop = -1; -static int infinite_buffer = -1; -static enum ShowMode show_mode = SHOW_MODE_NONE; -static const char *audio_codec_name; -static const char *subtitle_codec_name; -static const char *video_codec_name; -double rdftspeed = 0.02; -static int64_t cursor_last_shown; -static int cursor_hidden = 0; -static const char **vfilters_list = NULL; -static int nb_vfilters = 0; -static char *afilters = NULL; -static int autorotate = 1; -static int find_stream_info = 1; -static int filter_nbthreads = 0; - -/* current context */ -static int is_full_screen; -static int64_t audio_callback_time; - -#define FF_QUIT_EVENT (SDL_USEREVENT + 2) - -static SDL_Window *window; -static SDL_Renderer *renderer; -static SDL_RendererInfo renderer_info = {0}; -static SDL_AudioDeviceID audio_dev; - -static const struct TextureFormatEntry { - enum AVPixelFormat format; - int texture_fmt; -} sdl_texture_format_map[] = { - { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 }, - { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 }, - { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 }, - { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 }, - { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 }, - { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 }, - { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 }, - { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 }, - { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 }, - { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 }, - { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 }, - { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 }, - { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 }, - { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 }, - { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 }, - { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 }, - { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV }, - { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 }, - { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY }, - { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN }, -}; - -static int opt_add_vfilter(void *optctx, const char *opt, const char *arg) -{ - GROW_ARRAY(vfilters_list, nb_vfilters); - vfilters_list[nb_vfilters - 1] = arg; - return 0; -} - -static inline -int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, - enum AVSampleFormat fmt2, int64_t channel_count2) -{ - /* If channel count == 1, planar and non-planar formats are the same */ - if (channel_count1 == 1 && channel_count2 == 1) - return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2); - else - return channel_count1 != channel_count2 || fmt1 != fmt2; -} - -static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt) -{ - MyAVPacketList pkt1; - int ret; - - if (q->abort_request) - return -1; - - - pkt1.pkt = pkt; - pkt1.serial = q->serial; - - ret = av_fifo_write(q->pkt_list, &pkt1, 1); - if (ret < 0) - return ret; - q->nb_packets++; - q->size += pkt1.pkt->size + sizeof(pkt1); - q->duration += pkt1.pkt->duration; - /* XXX: should duplicate packet data in DV case */ - SDL_CondSignal(q->cond); - return 0; -} - -static int packet_queue_put(PacketQueue *q, AVPacket *pkt) -{ - AVPacket *pkt1; - int ret; - - pkt1 = av_packet_alloc(); - if (!pkt1) { - av_packet_unref(pkt); - return -1; - } - av_packet_move_ref(pkt1, pkt); - - SDL_LockMutex(q->mutex); - ret = packet_queue_put_private(q, pkt1); - SDL_UnlockMutex(q->mutex); - - if (ret < 0) - av_packet_free(&pkt1); - - return ret; -} - -static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index) -{ - pkt->stream_index = stream_index; - return packet_queue_put(q, pkt); -} - -/* packet queue handling */ -static int packet_queue_init(PacketQueue *q) -{ - memset(q, 0, sizeof(PacketQueue)); - q->pkt_list = av_fifo_alloc2(1, sizeof(MyAVPacketList), AV_FIFO_FLAG_AUTO_GROW); - if (!q->pkt_list) - return AVERROR(ENOMEM); - q->mutex = SDL_CreateMutex(); - if (!q->mutex) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError()); - return AVERROR(ENOMEM); - } - q->cond = SDL_CreateCond(); - if (!q->cond) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError()); - return AVERROR(ENOMEM); - } - q->abort_request = 1; - return 0; -} - -static void packet_queue_flush(PacketQueue *q) -{ - MyAVPacketList pkt1; - - SDL_LockMutex(q->mutex); - while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) - av_packet_free(&pkt1.pkt); - q->nb_packets = 0; - q->size = 0; - q->duration = 0; - q->serial++; - SDL_UnlockMutex(q->mutex); -} - -static void packet_queue_destroy(PacketQueue *q) -{ - packet_queue_flush(q); - av_fifo_freep2(&q->pkt_list); - SDL_DestroyMutex(q->mutex); - SDL_DestroyCond(q->cond); -} - -static void packet_queue_abort(PacketQueue *q) -{ - SDL_LockMutex(q->mutex); - - q->abort_request = 1; - - SDL_CondSignal(q->cond); - - SDL_UnlockMutex(q->mutex); -} - -static void packet_queue_start(PacketQueue *q) -{ - SDL_LockMutex(q->mutex); - q->abort_request = 0; - q->serial++; - SDL_UnlockMutex(q->mutex); -} - -/* return < 0 if aborted, 0 if no packet and > 0 if packet. */ -static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial) -{ - MyAVPacketList pkt1; - int ret; - - SDL_LockMutex(q->mutex); - - for (;;) { - if (q->abort_request) { - ret = -1; - break; - } - - if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) { - q->nb_packets--; - q->size -= pkt1.pkt->size + sizeof(pkt1); - q->duration -= pkt1.pkt->duration; - av_packet_move_ref(pkt, pkt1.pkt); - if (serial) - *serial = pkt1.serial; - av_packet_free(&pkt1.pkt); - ret = 1; - break; - } else if (!block) { - ret = 0; - break; - } else { - SDL_CondWait(q->cond, q->mutex); - } - } - SDL_UnlockMutex(q->mutex); - return ret; -} - -static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) { - memset(d, 0, sizeof(Decoder)); - d->pkt = av_packet_alloc(); - if (!d->pkt) - return AVERROR(ENOMEM); - d->avctx = avctx; - d->queue = queue; - d->empty_queue_cond = empty_queue_cond; - d->start_pts = AV_NOPTS_VALUE; - d->pkt_serial = -1; - return 0; -} - -static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) { - int ret = AVERROR(EAGAIN); - - for (;;) { - if (d->queue->serial == d->pkt_serial) { - do { - if (d->queue->abort_request) - return -1; - - switch (d->avctx->codec_type) { - case AVMEDIA_TYPE_VIDEO: - ret = avcodec_receive_frame(d->avctx, frame); - if (ret >= 0) { - if (decoder_reorder_pts == -1) { - frame->pts = frame->best_effort_timestamp; - } else if (!decoder_reorder_pts) { - frame->pts = frame->pkt_dts; - } - } - break; - case AVMEDIA_TYPE_AUDIO: - ret = avcodec_receive_frame(d->avctx, frame); - if (ret >= 0) { - AVRational tb = (AVRational){1, frame->sample_rate}; - if (frame->pts != AV_NOPTS_VALUE) - frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb); - else if (d->next_pts != AV_NOPTS_VALUE) - frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb); - if (frame->pts != AV_NOPTS_VALUE) { - d->next_pts = frame->pts + frame->nb_samples; - d->next_pts_tb = tb; - } - } - break; - } - if (ret == AVERROR_EOF) { - d->finished = d->pkt_serial; - avcodec_flush_buffers(d->avctx); - return 0; - } - if (ret >= 0) - return 1; - } while (ret != AVERROR(EAGAIN)); - } - - do { - if (d->queue->nb_packets == 0) - SDL_CondSignal(d->empty_queue_cond); - if (d->packet_pending) { - d->packet_pending = 0; - } else { - int old_serial = d->pkt_serial; - if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0) - return -1; - if (old_serial != d->pkt_serial) { - avcodec_flush_buffers(d->avctx); - d->finished = 0; - d->next_pts = d->start_pts; - d->next_pts_tb = d->start_pts_tb; - } - } - if (d->queue->serial == d->pkt_serial) - break; - av_packet_unref(d->pkt); - } while (1); - - if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) { - int got_frame = 0; - ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt); - if (ret < 0) { - ret = AVERROR(EAGAIN); - } else { - if (got_frame && !d->pkt->data) { - d->packet_pending = 1; - } - ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF); - } - av_packet_unref(d->pkt); - } else { - if (d->pkt->buf && !d->pkt->opaque_ref) { - FrameData *fd; - - d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd)); - if (!d->pkt->opaque_ref) - return AVERROR(ENOMEM); - fd = (FrameData*)d->pkt->opaque_ref->data; - fd->pkt_pos = d->pkt->pos; - } - - if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) { - av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n"); - d->packet_pending = 1; - } else { - av_packet_unref(d->pkt); - } - } - } -} - -static void decoder_destroy(Decoder *d) { - av_packet_free(&d->pkt); - avcodec_free_context(&d->avctx); -} - -static void frame_queue_unref_item(Frame *vp) -{ - av_frame_unref(vp->frame); - avsubtitle_free(&vp->sub); -} - -static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last) -{ - int i; - memset(f, 0, sizeof(FrameQueue)); - if (!(f->mutex = SDL_CreateMutex())) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError()); - return AVERROR(ENOMEM); - } - if (!(f->cond = SDL_CreateCond())) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError()); - return AVERROR(ENOMEM); - } - f->pktq = pktq; - f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE); - f->keep_last = !!keep_last; - for (i = 0; i < f->max_size; i++) - if (!(f->queue[i].frame = av_frame_alloc())) - return AVERROR(ENOMEM); - return 0; -} - -static void frame_queue_destory(FrameQueue *f) -{ - int i; - for (i = 0; i < f->max_size; i++) { - Frame *vp = &f->queue[i]; - frame_queue_unref_item(vp); - av_frame_free(&vp->frame); - } - SDL_DestroyMutex(f->mutex); - SDL_DestroyCond(f->cond); -} - -static void frame_queue_signal(FrameQueue *f) -{ - SDL_LockMutex(f->mutex); - SDL_CondSignal(f->cond); - SDL_UnlockMutex(f->mutex); -} - -static Frame *frame_queue_peek(FrameQueue *f) -{ - return &f->queue[(f->rindex + f->rindex_shown) % f->max_size]; -} - -static Frame *frame_queue_peek_next(FrameQueue *f) -{ - return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size]; -} - -static Frame *frame_queue_peek_last(FrameQueue *f) -{ - return &f->queue[f->rindex]; -} - -static Frame *frame_queue_peek_writable(FrameQueue *f) -{ - /* wait until we have space to put a new frame */ - SDL_LockMutex(f->mutex); - while (f->size >= f->max_size && - !f->pktq->abort_request) { - SDL_CondWait(f->cond, f->mutex); - } - SDL_UnlockMutex(f->mutex); - - if (f->pktq->abort_request) - return NULL; - - return &f->queue[f->windex]; -} - -static Frame *frame_queue_peek_readable(FrameQueue *f) -{ - /* wait until we have a readable a new frame */ - SDL_LockMutex(f->mutex); - while (f->size - f->rindex_shown <= 0 && - !f->pktq->abort_request) { - SDL_CondWait(f->cond, f->mutex); - } - SDL_UnlockMutex(f->mutex); - - if (f->pktq->abort_request) - return NULL; - - return &f->queue[(f->rindex + f->rindex_shown) % f->max_size]; -} - -static void frame_queue_push(FrameQueue *f) -{ - if (++f->windex == f->max_size) - f->windex = 0; - SDL_LockMutex(f->mutex); - f->size++; - SDL_CondSignal(f->cond); - SDL_UnlockMutex(f->mutex); -} - -static void frame_queue_next(FrameQueue *f) -{ - if (f->keep_last && !f->rindex_shown) { - f->rindex_shown = 1; - return; - } - frame_queue_unref_item(&f->queue[f->rindex]); - if (++f->rindex == f->max_size) - f->rindex = 0; - SDL_LockMutex(f->mutex); - f->size--; - SDL_CondSignal(f->cond); - SDL_UnlockMutex(f->mutex); -} - -/* return the number of undisplayed frames in the queue */ -static int frame_queue_nb_remaining(FrameQueue *f) -{ - return f->size - f->rindex_shown; -} - -/* return last shown position */ -static int64_t frame_queue_last_pos(FrameQueue *f) -{ - Frame *fp = &f->queue[f->rindex]; - if (f->rindex_shown && fp->serial == f->pktq->serial) - return fp->pos; - else - return -1; -} - -static void decoder_abort(Decoder *d, FrameQueue *fq) -{ - packet_queue_abort(d->queue); - frame_queue_signal(fq); - SDL_WaitThread(d->decoder_tid, NULL); - d->decoder_tid = NULL; - packet_queue_flush(d->queue); -} - -static inline void fill_rectangle(int x, int y, int w, int h) -{ - SDL_Rect rect; - rect.x = x; - rect.y = y; - rect.w = w; - rect.h = h; - if (w && h) - SDL_RenderFillRect(renderer, &rect); -} - -static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture) -{ - Uint32 format; - int access, w, h; - if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) { - void *pixels; - int pitch; - if (*texture) - SDL_DestroyTexture(*texture); - if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height))) - return -1; - if (SDL_SetTextureBlendMode(*texture, blendmode) < 0) - return -1; - if (init_texture) { - if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0) - return -1; - memset(pixels, 0, pitch * new_height); - SDL_UnlockTexture(*texture); - } - av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format)); - } - return 0; -} - -static void calculate_display_rect(SDL_Rect *rect, - int scr_xleft, int scr_ytop, int scr_width, int scr_height, - int pic_width, int pic_height, AVRational pic_sar) -{ - AVRational aspect_ratio = pic_sar; - int64_t width, height, x, y; - - if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0) - aspect_ratio = av_make_q(1, 1); - - aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height)); - - /* XXX: we suppose the screen has a 1.0 pixel ratio */ - height = scr_height; - width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1; - if (width > scr_width) { - width = scr_width; - height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1; - } - x = (scr_width - width) / 2; - y = (scr_height - height) / 2; - rect->x = scr_xleft + x; - rect->y = scr_ytop + y; - rect->w = FFMAX((int)width, 1); - rect->h = FFMAX((int)height, 1); -} - -static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode) -{ - int i; - *sdl_blendmode = SDL_BLENDMODE_NONE; - *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN; - if (format == AV_PIX_FMT_RGB32 || - format == AV_PIX_FMT_RGB32_1 || - format == AV_PIX_FMT_BGR32 || - format == AV_PIX_FMT_BGR32_1) - *sdl_blendmode = SDL_BLENDMODE_BLEND; - for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) { - if (format == sdl_texture_format_map[i].format) { - *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt; - return; - } - } -} - -static int upload_texture(SDL_Texture **tex, AVFrame *frame) -{ - int ret = 0; - Uint32 sdl_pix_fmt; - SDL_BlendMode sdl_blendmode; - get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode); - if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0) - return -1; - switch (sdl_pix_fmt) { - case SDL_PIXELFORMAT_IYUV: - if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) { - ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0], - frame->data[1], frame->linesize[1], - frame->data[2], frame->linesize[2]); - } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) { - ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0], - frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1], - frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]); - } else { - av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n"); - return -1; - } - break; - default: - if (frame->linesize[0] < 0) { - ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]); - } else { - ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]); - } - break; - } - return ret; -} - -static void set_sdl_yuv_conversion_mode(AVFrame *frame) -{ -#if SDL_VERSION_ATLEAST(2,0,8) - SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC; - if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) { - if (frame->color_range == AVCOL_RANGE_JPEG) - mode = SDL_YUV_CONVERSION_JPEG; - else if (frame->colorspace == AVCOL_SPC_BT709) - mode = SDL_YUV_CONVERSION_BT709; - else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M) - mode = SDL_YUV_CONVERSION_BT601; - } - SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */ -#endif -} - -static void video_image_display(VideoState *is) -{ - Frame *vp; - Frame *sp = NULL; - SDL_Rect rect; - - vp = frame_queue_peek_last(&is->pictq); - if (is->subtitle_st) { - if (frame_queue_nb_remaining(&is->subpq) > 0) { - sp = frame_queue_peek(&is->subpq); - - if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) { - if (!sp->uploaded) { - uint8_t* pixels[4]; - int pitch[4]; - int i; - if (!sp->width || !sp->height) { - sp->width = vp->width; - sp->height = vp->height; - } - if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0) - return; - - for (i = 0; i < sp->sub.num_rects; i++) { - AVSubtitleRect *sub_rect = sp->sub.rects[i]; - - sub_rect->x = av_clip(sub_rect->x, 0, sp->width ); - sub_rect->y = av_clip(sub_rect->y, 0, sp->height); - sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x); - sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y); - - is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx, - sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8, - sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA, - 0, NULL, NULL, NULL); - if (!is->sub_convert_ctx) { - av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n"); - return; - } - if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) { - sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize, - 0, sub_rect->h, pixels, pitch); - SDL_UnlockTexture(is->sub_texture); - } - } - sp->uploaded = 1; - } - } else - sp = NULL; - } - } - - calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar); - set_sdl_yuv_conversion_mode(vp->frame); - - if (!vp->uploaded) { - if (upload_texture(&is->vid_texture, vp->frame) < 0) { - set_sdl_yuv_conversion_mode(NULL); - return; - } - vp->uploaded = 1; - vp->flip_v = vp->frame->linesize[0] < 0; - } - - SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0); - set_sdl_yuv_conversion_mode(NULL); - if (sp) { -#if USE_ONEPASS_SUBTITLE_RENDER - SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect); -#else - int i; - double xratio = (double)rect.w / (double)sp->width; - double yratio = (double)rect.h / (double)sp->height; - for (i = 0; i < sp->sub.num_rects; i++) { - SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i]; - SDL_Rect target = {.x = rect.x + sub_rect->x * xratio, - .y = rect.y + sub_rect->y * yratio, - .w = sub_rect->w * xratio, - .h = sub_rect->h * yratio}; - SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target); - } -#endif - } -} - -static inline int compute_mod(int a, int b) -{ - return a < 0 ? a%b + b : a%b; -} - -static void video_audio_display(VideoState *s) -{ - int i, i_start, x, y1, y, ys, delay, n, nb_display_channels; - int ch, channels, h, h2; - int64_t time_diff; - int rdft_bits, nb_freq; - - for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++) - ; - nb_freq = 1 << (rdft_bits - 1); - - /* compute display index : center on currently output samples */ - channels = s->audio_tgt.ch_layout.nb_channels; - nb_display_channels = channels; - if (!s->paused) { - int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq); - n = 2 * channels; - delay = s->audio_write_buf_size; - delay /= n; - - /* to be more precise, we take into account the time spent since - the last buffer computation */ - if (audio_callback_time) { - time_diff = av_gettime_relative() - audio_callback_time; - delay -= (time_diff * s->audio_tgt.freq) / 1000000; - } - - delay += 2 * data_used; - if (delay < data_used) - delay = data_used; - - i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE); - if (s->show_mode == SHOW_MODE_WAVES) { - h = INT_MIN; - for (i = 0; i < 1000; i += channels) { - int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE; - int a = s->sample_array[idx]; - int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE]; - int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE]; - int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE]; - int score = a - d; - if (h < score && (b ^ c) < 0) { - h = score; - i_start = idx; - } - } - } - - s->last_i_start = i_start; - } else { - i_start = s->last_i_start; - } - - if (s->show_mode == SHOW_MODE_WAVES) { - SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255); - - /* total height for one channel */ - h = s->height / nb_display_channels; - /* graph height / 2 */ - h2 = (h * 9) / 20; - for (ch = 0; ch < nb_display_channels; ch++) { - i = i_start + ch; - y1 = s->ytop + ch * h + (h / 2); /* position of center line */ - for (x = 0; x < s->width; x++) { - y = (s->sample_array[i] * h2) >> 15; - if (y < 0) { - y = -y; - ys = y1 - y; - } else { - ys = y1; - } - fill_rectangle(s->xleft + x, ys, 1, y); - i += channels; - if (i >= SAMPLE_ARRAY_SIZE) - i -= SAMPLE_ARRAY_SIZE; - } - } - - SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255); - - for (ch = 1; ch < nb_display_channels; ch++) { - y = s->ytop + ch * h; - fill_rectangle(s->xleft, y, s->width, 1); - } - } else { - if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0) - return; - - if (s->xpos >= s->width) - s->xpos = 0; - nb_display_channels= FFMIN(nb_display_channels, 2); - if (rdft_bits != s->rdft_bits) { - av_rdft_end(s->rdft); - av_free(s->rdft_data); - s->rdft = av_rdft_init(rdft_bits, DFT_R2C); - s->rdft_bits = rdft_bits; - s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data)); - } - if (!s->rdft || !s->rdft_data){ - av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n"); - s->show_mode = SHOW_MODE_WAVES; - } else { - FFTSample *data[2]; - SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height}; - uint32_t *pixels; - int pitch; - for (ch = 0; ch < nb_display_channels; ch++) { - data[ch] = s->rdft_data + 2 * nb_freq * ch; - i = i_start + ch; - for (x = 0; x < 2 * nb_freq; x++) { - double w = (x-nb_freq) * (1.0 / nb_freq); - data[ch][x] = s->sample_array[i] * (1.0 - w * w); - i += channels; - if (i >= SAMPLE_ARRAY_SIZE) - i -= SAMPLE_ARRAY_SIZE; - } - av_rdft_calc(s->rdft, data[ch]); - } - /* Least efficient way to do this, we should of course - * directly access it but it is more than fast enough. */ - if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) { - pitch >>= 2; - pixels += pitch * s->height; - for (y = 0; y < s->height; y++) { - double w = 1 / sqrt(nb_freq); - int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1])); - int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1])) - : a; - a = FFMIN(a, 255); - b = FFMIN(b, 255); - pixels -= pitch; - *pixels = (a << 16) + (b << 8) + ((a+b) >> 1); - } - SDL_UnlockTexture(s->vis_texture); - } - SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL); - } - if (!s->paused) - s->xpos++; - } -} - -static void stream_component_close(VideoState *is, int stream_index) -{ - AVFormatContext *ic = is->ic; - AVCodecParameters *codecpar; - - if (stream_index < 0 || stream_index >= ic->nb_streams) - return; - codecpar = ic->streams[stream_index]->codecpar; - - switch (codecpar->codec_type) { - case AVMEDIA_TYPE_AUDIO: - decoder_abort(&is->auddec, &is->sampq); - SDL_CloseAudioDevice(audio_dev); - decoder_destroy(&is->auddec); - swr_free(&is->swr_ctx); - av_freep(&is->audio_buf1); - is->audio_buf1_size = 0; - is->audio_buf = NULL; - - if (is->rdft) { - av_rdft_end(is->rdft); - av_freep(&is->rdft_data); - is->rdft = NULL; - is->rdft_bits = 0; - } - break; - case AVMEDIA_TYPE_VIDEO: - decoder_abort(&is->viddec, &is->pictq); - decoder_destroy(&is->viddec); - break; - case AVMEDIA_TYPE_SUBTITLE: - decoder_abort(&is->subdec, &is->subpq); - decoder_destroy(&is->subdec); - break; - default: - break; - } - - ic->streams[stream_index]->discard = AVDISCARD_ALL; - switch (codecpar->codec_type) { - case AVMEDIA_TYPE_AUDIO: - is->audio_st = NULL; - is->audio_stream = -1; - break; - case AVMEDIA_TYPE_VIDEO: - is->video_st = NULL; - is->video_stream = -1; - break; - case AVMEDIA_TYPE_SUBTITLE: - is->subtitle_st = NULL; - is->subtitle_stream = -1; - break; - default: - break; - } -} - -static void stream_close(VideoState *is) -{ - /* XXX: use a special url_shutdown call to abort parse cleanly */ - is->abort_request = 1; - SDL_WaitThread(is->read_tid, NULL); - - /* close each stream */ - if (is->audio_stream >= 0) - stream_component_close(is, is->audio_stream); - if (is->video_stream >= 0) - stream_component_close(is, is->video_stream); - if (is->subtitle_stream >= 0) - stream_component_close(is, is->subtitle_stream); - - avformat_close_input(&is->ic); - - packet_queue_destroy(&is->videoq); - packet_queue_destroy(&is->audioq); - packet_queue_destroy(&is->subtitleq); - - /* free all pictures */ - frame_queue_destory(&is->pictq); - frame_queue_destory(&is->sampq); - frame_queue_destory(&is->subpq); - SDL_DestroyCond(is->continue_read_thread); - sws_freeContext(is->sub_convert_ctx); - av_free(is->filename); - if (is->vis_texture) - SDL_DestroyTexture(is->vis_texture); - if (is->vid_texture) - SDL_DestroyTexture(is->vid_texture); - if (is->sub_texture) - SDL_DestroyTexture(is->sub_texture); - av_free(is); -} - -static void do_exit(VideoState *is) -{ - if (is) { - stream_close(is); - } - if (renderer) - SDL_DestroyRenderer(renderer); - if (window) - SDL_DestroyWindow(window); - uninit_opts(); - av_freep(&vfilters_list); - avformat_network_deinit(); - if (show_status) - printf("\n"); - SDL_Quit(); - av_log(NULL, AV_LOG_QUIET, "%s", ""); - exit(0); -} - -static void sigterm_handler(int sig) -{ - exit(123); -} - -static void set_default_window_size(int width, int height, AVRational sar) -{ - SDL_Rect rect; - int max_width = screen_width ? screen_width : INT_MAX; - int max_height = screen_height ? screen_height : INT_MAX; - if (max_width == INT_MAX && max_height == INT_MAX) - max_height = height; - calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar); - default_width = rect.w; - default_height = rect.h; -} - -static int video_open(VideoState *is) -{ - int w,h; - - w = screen_width ? screen_width : default_width; - h = screen_height ? screen_height : default_height; - - if (!window_title) - window_title = input_filename; - SDL_SetWindowTitle(window, window_title); - - SDL_SetWindowSize(window, w, h); - SDL_SetWindowPosition(window, screen_left, screen_top); - if (is_full_screen) - SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP); - SDL_ShowWindow(window); - - is->width = w; - is->height = h; - - return 0; -} - -/* display the current picture, if any */ -static void video_display(VideoState *is) -{ - if (!is->width) - video_open(is); - - SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255); - SDL_RenderClear(renderer); - if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO) - video_audio_display(is); - else if (is->video_st) - video_image_display(is); - SDL_RenderPresent(renderer); -} - -static double get_clock(Clock *c) -{ - if (*c->queue_serial != c->serial) - return NAN; - if (c->paused) { - return c->pts; - } else { - double time = av_gettime_relative() / 1000000.0; - return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed); - } -} - -static void set_clock_at(Clock *c, double pts, int serial, double time) -{ - c->pts = pts; - c->last_updated = time; - c->pts_drift = c->pts - time; - c->serial = serial; -} - -static void set_clock(Clock *c, double pts, int serial) -{ - double time = av_gettime_relative() / 1000000.0; - set_clock_at(c, pts, serial, time); -} - -static void set_clock_speed(Clock *c, double speed) -{ - set_clock(c, get_clock(c), c->serial); - c->speed = speed; -} - -static void init_clock(Clock *c, int *queue_serial) -{ - c->speed = 1.0; - c->paused = 0; - c->queue_serial = queue_serial; - set_clock(c, NAN, -1); -} - -static void sync_clock_to_slave(Clock *c, Clock *slave) -{ - double clock = get_clock(c); - double slave_clock = get_clock(slave); - if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD)) - set_clock(c, slave_clock, slave->serial); -} - -static int get_master_sync_type(VideoState *is) { - if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) { - if (is->video_st) - return AV_SYNC_VIDEO_MASTER; - else - return AV_SYNC_AUDIO_MASTER; - } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) { - if (is->audio_st) - return AV_SYNC_AUDIO_MASTER; - else - return AV_SYNC_EXTERNAL_CLOCK; - } else { - return AV_SYNC_EXTERNAL_CLOCK; - } -} - -/* get the current master clock value */ -static double get_master_clock(VideoState *is) -{ - double val; - - switch (get_master_sync_type(is)) { - case AV_SYNC_VIDEO_MASTER: - val = get_clock(&is->vidclk); - break; - case AV_SYNC_AUDIO_MASTER: - val = get_clock(&is->audclk); - break; - default: - val = get_clock(&is->extclk); - break; - } - return val; -} - -static void check_external_clock_speed(VideoState *is) { - if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES || - is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) { - set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP)); - } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) && - (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) { - set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP)); - } else { - double speed = is->extclk.speed; - if (speed != 1.0) - set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed)); - } -} - -/* seek in the stream */ -static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes) -{ - if (!is->seek_req) { - is->seek_pos = pos; - is->seek_rel = rel; - is->seek_flags &= ~AVSEEK_FLAG_BYTE; - if (by_bytes) - is->seek_flags |= AVSEEK_FLAG_BYTE; - is->seek_req = 1; - SDL_CondSignal(is->continue_read_thread); - } -} - -/* pause or resume the video */ -static void stream_toggle_pause(VideoState *is) -{ - if (is->paused) { - is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated; - if (is->read_pause_return != AVERROR(ENOSYS)) { - is->vidclk.paused = 0; - } - set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial); - } - set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial); - is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused; -} - -static void toggle_pause(VideoState *is) -{ - stream_toggle_pause(is); - is->step = 0; -} - -static void toggle_mute(VideoState *is) -{ - is->muted = !is->muted; -} - -static void update_volume(VideoState *is, int sign, double step) -{ - double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0; - int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0)); - is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME); -} - -static void step_to_next_frame(VideoState *is) -{ - /* if the stream is paused unpause it, then step */ - if (is->paused) - stream_toggle_pause(is); - is->step = 1; -} - -static double compute_target_delay(double delay, VideoState *is) -{ - double sync_threshold, diff = 0; - - /* update delay to follow master synchronisation source */ - if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) { - /* if video is slave, we try to correct big delays by - duplicating or deleting a frame */ - diff = get_clock(&is->vidclk) - get_master_clock(is); - - /* skip or repeat frame. We take into account the - delay to compute the threshold. I still don't know - if it is the best guess */ - sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay)); - if (!isnan(diff) && fabs(diff) < is->max_frame_duration) { - if (diff <= -sync_threshold) - delay = FFMAX(0, delay + diff); - else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD) - delay = delay + diff; - else if (diff >= sync_threshold) - delay = 2 * delay; - } - } - - av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n", - delay, -diff); - - return delay; -} - -static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) { - if (vp->serial == nextvp->serial) { - double duration = nextvp->pts - vp->pts; - if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration) - return vp->duration; - else - return duration; - } else { - return 0.0; - } -} - -static void update_video_pts(VideoState *is, double pts, int serial) -{ - /* update current video pts */ - set_clock(&is->vidclk, pts, serial); - sync_clock_to_slave(&is->extclk, &is->vidclk); -} - -/* called to display each frame */ -static void video_refresh(void *opaque, double *remaining_time) -{ - VideoState *is = opaque; - double time; - - Frame *sp, *sp2; - - if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime) - check_external_clock_speed(is); - - if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) { - time = av_gettime_relative() / 1000000.0; - if (is->force_refresh || is->last_vis_time + rdftspeed < time) { - video_display(is); - is->last_vis_time = time; - } - *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time); - } - - if (is->video_st) { -retry: - if (frame_queue_nb_remaining(&is->pictq) == 0) { - // nothing to do, no picture to display in the queue - } else { - double last_duration, duration, delay; - Frame *vp, *lastvp; - - /* dequeue the picture */ - lastvp = frame_queue_peek_last(&is->pictq); - vp = frame_queue_peek(&is->pictq); - - if (vp->serial != is->videoq.serial) { - frame_queue_next(&is->pictq); - goto retry; - } - - if (lastvp->serial != vp->serial) - is->frame_timer = av_gettime_relative() / 1000000.0; - - if (is->paused) - goto display; - - /* compute nominal last_duration */ - last_duration = vp_duration(is, lastvp, vp); - delay = compute_target_delay(last_duration, is); - - time= av_gettime_relative()/1000000.0; - if (time < is->frame_timer + delay) { - *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time); - goto display; - } - - is->frame_timer += delay; - if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX) - is->frame_timer = time; - - SDL_LockMutex(is->pictq.mutex); - if (!isnan(vp->pts)) - update_video_pts(is, vp->pts, vp->serial); - SDL_UnlockMutex(is->pictq.mutex); - - if (frame_queue_nb_remaining(&is->pictq) > 1) { - Frame *nextvp = frame_queue_peek_next(&is->pictq); - duration = vp_duration(is, vp, nextvp); - if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){ - is->frame_drops_late++; - frame_queue_next(&is->pictq); - goto retry; - } - } - - if (is->subtitle_st) { - while (frame_queue_nb_remaining(&is->subpq) > 0) { - sp = frame_queue_peek(&is->subpq); - - if (frame_queue_nb_remaining(&is->subpq) > 1) - sp2 = frame_queue_peek_next(&is->subpq); - else - sp2 = NULL; - - if (sp->serial != is->subtitleq.serial - || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000))) - || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000)))) - { - if (sp->uploaded) { - int i; - for (i = 0; i < sp->sub.num_rects; i++) { - AVSubtitleRect *sub_rect = sp->sub.rects[i]; - uint8_t *pixels; - int pitch, j; - - if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) { - for (j = 0; j < sub_rect->h; j++, pixels += pitch) - memset(pixels, 0, sub_rect->w << 2); - SDL_UnlockTexture(is->sub_texture); - } - } - } - frame_queue_next(&is->subpq); - } else { - break; - } - } - } - - frame_queue_next(&is->pictq); - is->force_refresh = 1; - - if (is->step && !is->paused) - stream_toggle_pause(is); - } -display: - /* display picture */ - if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown) - video_display(is); - } - is->force_refresh = 0; - if (show_status) { - AVBPrint buf; - static int64_t last_time; - int64_t cur_time; - int aqsize, vqsize, sqsize; - double av_diff; - - cur_time = av_gettime_relative(); - if (!last_time || (cur_time - last_time) >= 30000) { - aqsize = 0; - vqsize = 0; - sqsize = 0; - if (is->audio_st) - aqsize = is->audioq.size; - if (is->video_st) - vqsize = is->videoq.size; - if (is->subtitle_st) - sqsize = is->subtitleq.size; - av_diff = 0; - if (is->audio_st && is->video_st) - av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk); - else if (is->video_st) - av_diff = get_master_clock(is) - get_clock(&is->vidclk); - else if (is->audio_st) - av_diff = get_master_clock(is) - get_clock(&is->audclk); - - av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); - av_bprintf(&buf, - "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r", - get_master_clock(is), - (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")), - av_diff, - is->frame_drops_early + is->frame_drops_late, - aqsize / 1024, - vqsize / 1024, - sqsize, - is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0, - is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0); - - if (show_status == 1 && AV_LOG_INFO > av_log_get_level()) - fprintf(stderr, "%s", buf.str); - else - av_log(NULL, AV_LOG_INFO, "%s", buf.str); - - fflush(stderr); - av_bprint_finalize(&buf, NULL); - - last_time = cur_time; - } - } -} - -static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial) -{ - Frame *vp; - -#if defined(DEBUG_SYNC) - printf("frame_type=%c pts=%0.3f\n", - av_get_picture_type_char(src_frame->pict_type), pts); -#endif - - if (!(vp = frame_queue_peek_writable(&is->pictq))) - return -1; - - vp->sar = src_frame->sample_aspect_ratio; - vp->uploaded = 0; - - vp->width = src_frame->width; - vp->height = src_frame->height; - vp->format = src_frame->format; - - vp->pts = pts; - vp->duration = duration; - vp->pos = pos; - vp->serial = serial; - - set_default_window_size(vp->width, vp->height, vp->sar); - - av_frame_move_ref(vp->frame, src_frame); - frame_queue_push(&is->pictq); - return 0; -} - -static int get_video_frame(VideoState *is, AVFrame *frame) -{ - int got_picture; - - if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0) - return -1; - - if (got_picture) { - double dpts = NAN; - - if (frame->pts != AV_NOPTS_VALUE) - dpts = av_q2d(is->video_st->time_base) * frame->pts; - - frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); - - if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { - if (frame->pts != AV_NOPTS_VALUE) { - double diff = dpts - get_master_clock(is); - if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD && - diff - is->frame_last_filter_delay < 0 && - is->viddec.pkt_serial == is->vidclk.serial && - is->videoq.nb_packets) { - is->frame_drops_early++; - av_frame_unref(frame); - got_picture = 0; - } - } - } - } - - return got_picture; -} - -static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, - AVFilterContext *source_ctx, AVFilterContext *sink_ctx) -{ - int ret, i; - int nb_filters = graph->nb_filters; - AVFilterInOut *outputs = NULL, *inputs = NULL; - - if (filtergraph) { - outputs = avfilter_inout_alloc(); - inputs = avfilter_inout_alloc(); - if (!outputs || !inputs) { - ret = AVERROR(ENOMEM); - goto fail; - } - - outputs->name = av_strdup("in"); - outputs->filter_ctx = source_ctx; - outputs->pad_idx = 0; - outputs->next = NULL; - - inputs->name = av_strdup("out"); - inputs->filter_ctx = sink_ctx; - inputs->pad_idx = 0; - inputs->next = NULL; - - if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0) - goto fail; - } else { - if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0) - goto fail; - } - - /* Reorder the filters to ensure that inputs of the custom filters are merged first */ - for (i = 0; i < graph->nb_filters - nb_filters; i++) - FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]); - - ret = avfilter_graph_config(graph, NULL); -fail: - avfilter_inout_free(&outputs); - avfilter_inout_free(&inputs); - return ret; -} - -static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame) -{ - enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)]; - char sws_flags_str[512] = ""; - char buffersrc_args[256]; - int ret; - AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL; - AVCodecParameters *codecpar = is->video_st->codecpar; - AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL); - const AVDictionaryEntry *e = NULL; - int nb_pix_fmts = 0; - int i, j; - - for (i = 0; i < renderer_info.num_texture_formats; i++) { - for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) { - if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) { - pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format; - break; - } - } - } - pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE; - - while ((e = av_dict_iterate(sws_dict, e))) { - if (!strcmp(e->key, "sws_flags")) { - av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value); - } else - av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value); - } - if (strlen(sws_flags_str)) - sws_flags_str[strlen(sws_flags_str)-1] = '\0'; - - graph->scale_sws_opts = av_strdup(sws_flags_str); - - snprintf(buffersrc_args, sizeof(buffersrc_args), - "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", - frame->width, frame->height, frame->format, - is->video_st->time_base.num, is->video_st->time_base.den, - codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1)); - if (fr.num && fr.den) - av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den); - - if ((ret = avfilter_graph_create_filter(&filt_src, - avfilter_get_by_name("buffer"), - "ffplay_buffer", buffersrc_args, NULL, - graph)) < 0) - goto fail; - - ret = avfilter_graph_create_filter(&filt_out, - avfilter_get_by_name("buffersink"), - "ffplay_buffersink", NULL, NULL, graph); - if (ret < 0) - goto fail; - - if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0) - goto fail; - - last_filter = filt_out; - -/* Note: this macro adds a filter before the lastly added filter, so the - * processing order of the filters is in reverse */ -#define INSERT_FILT(name, arg) do { \ - AVFilterContext *filt_ctx; \ - \ - ret = avfilter_graph_create_filter(&filt_ctx, \ - avfilter_get_by_name(name), \ - "ffplay_" name, arg, NULL, graph); \ - if (ret < 0) \ - goto fail; \ - \ - ret = avfilter_link(filt_ctx, 0, last_filter, 0); \ - if (ret < 0) \ - goto fail; \ - \ - last_filter = filt_ctx; \ -} while (0) - - if (autorotate) { - double theta = 0.0; - int32_t *displaymatrix = NULL; - AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX); - if (sd) - displaymatrix = (int32_t *)sd->data; - if (!displaymatrix) - displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL); - theta = get_rotation(displaymatrix); - - if (fabs(theta - 90) < 1.0) { - INSERT_FILT("transpose", "clock"); - } else if (fabs(theta - 180) < 1.0) { - INSERT_FILT("hflip", NULL); - INSERT_FILT("vflip", NULL); - } else if (fabs(theta - 270) < 1.0) { - INSERT_FILT("transpose", "cclock"); - } else if (fabs(theta) > 1.0) { - char rotate_buf[64]; - snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta); - INSERT_FILT("rotate", rotate_buf); - } - } - - if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0) - goto fail; - - is->in_video_filter = filt_src; - is->out_video_filter = filt_out; - -fail: - return ret; -} - -static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format) -{ - static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; - int sample_rates[2] = { 0, -1 }; - AVFilterContext *filt_asrc = NULL, *filt_asink = NULL; - char aresample_swr_opts[512] = ""; - const AVDictionaryEntry *e = NULL; - AVBPrint bp; - char asrc_args[256]; - int ret; - - avfilter_graph_free(&is->agraph); - if (!(is->agraph = avfilter_graph_alloc())) - return AVERROR(ENOMEM); - is->agraph->nb_threads = filter_nbthreads; - - av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC); - - while ((e = av_dict_iterate(swr_opts, e))) - av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value); - if (strlen(aresample_swr_opts)) - aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0'; - av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0); - - av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp); - - ret = snprintf(asrc_args, sizeof(asrc_args), - "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s", - is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt), - 1, is->audio_filter_src.freq, bp.str); - - ret = avfilter_graph_create_filter(&filt_asrc, - avfilter_get_by_name("abuffer"), "ffplay_abuffer", - asrc_args, NULL, is->agraph); - if (ret < 0) - goto end; - - - ret = avfilter_graph_create_filter(&filt_asink, - avfilter_get_by_name("abuffersink"), "ffplay_abuffersink", - NULL, NULL, is->agraph); - if (ret < 0) - goto end; - - if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0) - goto end; - if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0) - goto end; - - if (force_output_format) { - sample_rates [0] = is->audio_tgt.freq; - if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0) - goto end; - if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0) - goto end; - if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0) - goto end; - } - - - if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0) - goto end; - - is->in_audio_filter = filt_asrc; - is->out_audio_filter = filt_asink; - -end: - if (ret < 0) - avfilter_graph_free(&is->agraph); - av_bprint_finalize(&bp, NULL); - - return ret; -} - -static int audio_thread(void *arg) -{ - VideoState *is = arg; - AVFrame *frame = av_frame_alloc(); - Frame *af; - int last_serial = -1; - int reconfigure; - int got_frame = 0; - AVRational tb; - int ret = 0; - - if (!frame) - return AVERROR(ENOMEM); - - do { - if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0) - goto the_end; - - if (got_frame) { - tb = (AVRational){1, frame->sample_rate}; - - reconfigure = - cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels, - frame->format, frame->ch_layout.nb_channels) || - av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) || - is->audio_filter_src.freq != frame->sample_rate || - is->auddec.pkt_serial != last_serial; - - if (reconfigure) { - char buf1[1024], buf2[1024]; - av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1)); - av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2)); - av_log(NULL, AV_LOG_DEBUG, - "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n", - is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial, - frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial); - - is->audio_filter_src.fmt = frame->format; - ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout); - if (ret < 0) - goto the_end; - is->audio_filter_src.freq = frame->sample_rate; - last_serial = is->auddec.pkt_serial; - - if ((ret = configure_audio_filters(is, afilters, 1)) < 0) - goto the_end; - } - - if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0) - goto the_end; - - while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) { - FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL; - tb = av_buffersink_get_time_base(is->out_audio_filter); - if (!(af = frame_queue_peek_writable(&is->sampq))) - goto the_end; - - af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); - af->pos = fd ? fd->pkt_pos : -1; - af->serial = is->auddec.pkt_serial; - af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate}); - - av_frame_move_ref(af->frame, frame); - frame_queue_push(&is->sampq); - - if (is->audioq.serial != is->auddec.pkt_serial) - break; - } - if (ret == AVERROR_EOF) - is->auddec.finished = is->auddec.pkt_serial; - } - } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF); - the_end: - avfilter_graph_free(&is->agraph); - av_frame_free(&frame); - return ret; -} - -static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg) -{ - packet_queue_start(d->queue); - d->decoder_tid = SDL_CreateThread(fn, thread_name, arg); - if (!d->decoder_tid) { - av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError()); - return AVERROR(ENOMEM); - } - return 0; -} - -static int video_thread(void *arg) -{ - VideoState *is = arg; - AVFrame *frame = av_frame_alloc(); - double pts; - double duration; - int ret; - AVRational tb = is->video_st->time_base; - AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL); - - AVFilterGraph *graph = NULL; - AVFilterContext *filt_out = NULL, *filt_in = NULL; - int last_w = 0; - int last_h = 0; - enum AVPixelFormat last_format = -2; - int last_serial = -1; - int last_vfilter_idx = 0; - - if (!frame) - return AVERROR(ENOMEM); - - for (;;) { - ret = get_video_frame(is, frame); - if (ret < 0) - goto the_end; - if (!ret) - continue; - - if ( last_w != frame->width - || last_h != frame->height - || last_format != frame->format - || last_serial != is->viddec.pkt_serial - || last_vfilter_idx != is->vfilter_idx) { - av_log(NULL, AV_LOG_DEBUG, - "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n", - last_w, last_h, - (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial, - frame->width, frame->height, - (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial); - avfilter_graph_free(&graph); - graph = avfilter_graph_alloc(); - if (!graph) { - ret = AVERROR(ENOMEM); - goto the_end; - } - graph->nb_threads = filter_nbthreads; - if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) { - SDL_Event event; - event.type = FF_QUIT_EVENT; - event.user.data1 = is; - SDL_PushEvent(&event); - goto the_end; - } - filt_in = is->in_video_filter; - filt_out = is->out_video_filter; - last_w = frame->width; - last_h = frame->height; - last_format = frame->format; - last_serial = is->viddec.pkt_serial; - last_vfilter_idx = is->vfilter_idx; - frame_rate = av_buffersink_get_frame_rate(filt_out); - } - - ret = av_buffersrc_add_frame(filt_in, frame); - if (ret < 0) - goto the_end; - - while (ret >= 0) { - FrameData *fd; - - is->frame_last_returned_time = av_gettime_relative() / 1000000.0; - - ret = av_buffersink_get_frame_flags(filt_out, frame, 0); - if (ret < 0) { - if (ret == AVERROR_EOF) - is->viddec.finished = is->viddec.pkt_serial; - ret = 0; - break; - } - - fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL; - - is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time; - if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) - is->frame_last_filter_delay = 0; - tb = av_buffersink_get_time_base(filt_out); - duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); - pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); - ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial); - av_frame_unref(frame); - if (is->videoq.serial != is->viddec.pkt_serial) - break; - } - - if (ret < 0) - goto the_end; - } - the_end: - avfilter_graph_free(&graph); - av_frame_free(&frame); - return 0; -} - -static int subtitle_thread(void *arg) -{ - VideoState *is = arg; - Frame *sp; - int got_subtitle; - double pts; - - for (;;) { - if (!(sp = frame_queue_peek_writable(&is->subpq))) - return 0; - - if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0) - break; - - pts = 0; - - if (got_subtitle && sp->sub.format == 0) { - if (sp->sub.pts != AV_NOPTS_VALUE) - pts = sp->sub.pts / (double)AV_TIME_BASE; - sp->pts = pts; - sp->serial = is->subdec.pkt_serial; - sp->width = is->subdec.avctx->width; - sp->height = is->subdec.avctx->height; - sp->uploaded = 0; - - /* now we can update the picture count */ - frame_queue_push(&is->subpq); - } else if (got_subtitle) { - avsubtitle_free(&sp->sub); - } - } - return 0; -} - -/* copy samples for viewing in editor window */ -static void update_sample_display(VideoState *is, short *samples, int samples_size) -{ - int size, len; - - size = samples_size / sizeof(short); - while (size > 0) { - len = SAMPLE_ARRAY_SIZE - is->sample_array_index; - if (len > size) - len = size; - memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short)); - samples += len; - is->sample_array_index += len; - if (is->sample_array_index >= SAMPLE_ARRAY_SIZE) - is->sample_array_index = 0; - size -= len; - } -} - -/* return the wanted number of samples to get better sync if sync_type is video - * or external master clock */ -static int synchronize_audio(VideoState *is, int nb_samples) -{ - int wanted_nb_samples = nb_samples; - - /* if not master, then we try to remove or add samples to correct the clock */ - if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) { - double diff, avg_diff; - int min_nb_samples, max_nb_samples; - - diff = get_clock(&is->audclk) - get_master_clock(is); - - if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) { - is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; - if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { - /* not enough measures to have a correct estimate */ - is->audio_diff_avg_count++; - } else { - /* estimate the A-V difference */ - avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); - - if (fabs(avg_diff) >= is->audio_diff_threshold) { - wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq); - min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100)); - max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100)); - wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples); - } - av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n", - diff, avg_diff, wanted_nb_samples - nb_samples, - is->audio_clock, is->audio_diff_threshold); - } - } else { - /* too big difference : may be initial PTS errors, so - reset A-V filter */ - is->audio_diff_avg_count = 0; - is->audio_diff_cum = 0; - } - } - - return wanted_nb_samples; -} - -/** - * Decode one audio frame and return its uncompressed size. - * - * The processed audio frame is decoded, converted if required, and - * stored in is->audio_buf, with size in bytes given by the return - * value. - */ -static int audio_decode_frame(VideoState *is) -{ - int data_size, resampled_data_size; - av_unused double audio_clock0; - int wanted_nb_samples; - Frame *af; - - if (is->paused) - return -1; - - do { -#if defined(_WIN32) - while (frame_queue_nb_remaining(&is->sampq) == 0) { - if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2) - return -1; - av_usleep (1000); - } -#endif - if (!(af = frame_queue_peek_readable(&is->sampq))) - return -1; - frame_queue_next(&is->sampq); - } while (af->serial != is->audioq.serial); - - data_size = av_samples_get_buffer_size(NULL, af->frame->ch_layout.nb_channels, - af->frame->nb_samples, - af->frame->format, 1); - - wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples); - - if (af->frame->format != is->audio_src.fmt || - av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) || - af->frame->sample_rate != is->audio_src.freq || - (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) { - swr_free(&is->swr_ctx); - swr_alloc_set_opts2(&is->swr_ctx, - &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq, - &af->frame->ch_layout, af->frame->format, af->frame->sample_rate, - 0, NULL); - if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) { - av_log(NULL, AV_LOG_ERROR, - "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n", - af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->ch_layout.nb_channels, - is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels); - swr_free(&is->swr_ctx); - return -1; - } - if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0) - return -1; - is->audio_src.freq = af->frame->sample_rate; - is->audio_src.fmt = af->frame->format; - } - - if (is->swr_ctx) { - const uint8_t **in = (const uint8_t **)af->frame->extended_data; - uint8_t **out = &is->audio_buf1; - int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256; - int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0); - int len2; - if (out_size < 0) { - av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n"); - return -1; - } - if (wanted_nb_samples != af->frame->nb_samples) { - if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate, - wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) { - av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n"); - return -1; - } - } - av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size); - if (!is->audio_buf1) - return AVERROR(ENOMEM); - len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples); - if (len2 < 0) { - av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n"); - return -1; - } - if (len2 == out_count) { - av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n"); - if (swr_init(is->swr_ctx) < 0) - swr_free(&is->swr_ctx); - } - is->audio_buf = is->audio_buf1; - resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt); - } else { - is->audio_buf = af->frame->data[0]; - resampled_data_size = data_size; - } - - audio_clock0 = is->audio_clock; - /* update the audio clock with the pts */ - if (!isnan(af->pts)) - is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate; - else - is->audio_clock = NAN; - is->audio_clock_serial = af->serial; -#ifdef DEBUG - { - static double last_clock; - printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n", - is->audio_clock - last_clock, - is->audio_clock, audio_clock0); - last_clock = is->audio_clock; - } -#endif - return resampled_data_size; -} - -/* prepare a new audio buffer */ -static void sdl_audio_callback(void *opaque, Uint8 *stream, int len) -{ - VideoState *is = opaque; - int audio_size, len1; - - audio_callback_time = av_gettime_relative(); - - while (len > 0) { - if (is->audio_buf_index >= is->audio_buf_size) { - audio_size = audio_decode_frame(is); - if (audio_size < 0) { - /* if error, just output silence */ - is->audio_buf = NULL; - is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size; - } else { - if (is->show_mode != SHOW_MODE_VIDEO) - update_sample_display(is, (int16_t *)is->audio_buf, audio_size); - is->audio_buf_size = audio_size; - } - is->audio_buf_index = 0; - } - len1 = is->audio_buf_size - is->audio_buf_index; - if (len1 > len) - len1 = len; - if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME) - memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); - else { - memset(stream, 0, len1); - if (!is->muted && is->audio_buf) - SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume); - } - len -= len1; - stream += len1; - is->audio_buf_index += len1; - } - is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index; - /* Let's assume the audio driver that is used by SDL has two periods. */ - if (!isnan(is->audio_clock)) { - set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0); - sync_clock_to_slave(&is->extclk, &is->audclk); - } -} - -static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params) -{ - SDL_AudioSpec wanted_spec, spec; - const char *env; - static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6}; - static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000}; - int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1; - int wanted_nb_channels = wanted_channel_layout->nb_channels; - - env = SDL_getenv("SDL_AUDIO_CHANNELS"); - if (env) { - wanted_nb_channels = atoi(env); - av_channel_layout_uninit(wanted_channel_layout); - av_channel_layout_default(wanted_channel_layout, wanted_nb_channels); - } - if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) { - av_channel_layout_uninit(wanted_channel_layout); - av_channel_layout_default(wanted_channel_layout, wanted_nb_channels); - } - wanted_nb_channels = wanted_channel_layout->nb_channels; - wanted_spec.channels = wanted_nb_channels; - wanted_spec.freq = wanted_sample_rate; - if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) { - av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n"); - return -1; - } - while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq) - next_sample_rate_idx--; - wanted_spec.format = AUDIO_S16SYS; - wanted_spec.silence = 0; - wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC)); - wanted_spec.callback = sdl_audio_callback; - wanted_spec.userdata = opaque; - while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) { - av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n", - wanted_spec.channels, wanted_spec.freq, SDL_GetError()); - wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)]; - if (!wanted_spec.channels) { - wanted_spec.freq = next_sample_rates[next_sample_rate_idx--]; - wanted_spec.channels = wanted_nb_channels; - if (!wanted_spec.freq) { - av_log(NULL, AV_LOG_ERROR, - "No more combinations to try, audio open failed\n"); - return -1; - } - } - av_channel_layout_default(wanted_channel_layout, wanted_spec.channels); - } - if (spec.format != AUDIO_S16SYS) { - av_log(NULL, AV_LOG_ERROR, - "SDL advised audio format %d is not supported!\n", spec.format); - return -1; - } - if (spec.channels != wanted_spec.channels) { - av_channel_layout_uninit(wanted_channel_layout); - av_channel_layout_default(wanted_channel_layout, spec.channels); - if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) { - av_log(NULL, AV_LOG_ERROR, - "SDL advised channel count %d is not supported!\n", spec.channels); - return -1; - } - } - - audio_hw_params->fmt = AV_SAMPLE_FMT_S16; - audio_hw_params->freq = spec.freq; - if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0) - return -1; - audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1); - audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1); - if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) { - av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n"); - return -1; - } - return spec.size; -} - -/* open a given stream. Return 0 if OK */ -static int stream_component_open(VideoState *is, int stream_index) -{ - AVFormatContext *ic = is->ic; - AVCodecContext *avctx; - const AVCodec *codec; - const char *forced_codec_name = NULL; - AVDictionary *opts = NULL; - const AVDictionaryEntry *t = NULL; - int sample_rate; - AVChannelLayout ch_layout = { 0 }; - int ret = 0; - int stream_lowres = lowres; - - if (stream_index < 0 || stream_index >= ic->nb_streams) - return -1; - - avctx = avcodec_alloc_context3(NULL); - if (!avctx) - return AVERROR(ENOMEM); - - ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar); - if (ret < 0) - goto fail; - avctx->pkt_timebase = ic->streams[stream_index]->time_base; - - codec = avcodec_find_decoder(avctx->codec_id); - - switch(avctx->codec_type){ - case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break; - case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break; - case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break; - } - if (forced_codec_name) - codec = avcodec_find_decoder_by_name(forced_codec_name); - if (!codec) { - if (forced_codec_name) av_log(NULL, AV_LOG_WARNING, - "No codec could be found with name '%s'\n", forced_codec_name); - else av_log(NULL, AV_LOG_WARNING, - "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id)); - ret = AVERROR(EINVAL); - goto fail; - } - - avctx->codec_id = codec->id; - if (stream_lowres > codec->max_lowres) { - av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", - codec->max_lowres); - stream_lowres = codec->max_lowres; - } - avctx->lowres = stream_lowres; - - if (fast) - avctx->flags2 |= AV_CODEC_FLAG2_FAST; - - opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec); - if (!av_dict_get(opts, "threads", NULL, 0)) - av_dict_set(&opts, "threads", "auto", 0); - if (stream_lowres) - av_dict_set_int(&opts, "lowres", stream_lowres, 0); - - av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY); - - if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) { - goto fail; - } - if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { - av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); - ret = AVERROR_OPTION_NOT_FOUND; - goto fail; - } - - is->eof = 0; - ic->streams[stream_index]->discard = AVDISCARD_DEFAULT; - switch (avctx->codec_type) { - case AVMEDIA_TYPE_AUDIO: - { - AVFilterContext *sink; - - is->audio_filter_src.freq = avctx->sample_rate; - ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout); - if (ret < 0) - goto fail; - is->audio_filter_src.fmt = avctx->sample_fmt; - if ((ret = configure_audio_filters(is, afilters, 0)) < 0) - goto fail; - sink = is->out_audio_filter; - sample_rate = av_buffersink_get_sample_rate(sink); - ret = av_buffersink_get_ch_layout(sink, &ch_layout); - if (ret < 0) - goto fail; - } - - /* prepare audio output */ - if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0) - goto fail; - is->audio_hw_buf_size = ret; - is->audio_src = is->audio_tgt; - is->audio_buf_size = 0; - is->audio_buf_index = 0; - - /* init averaging filter */ - is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); - is->audio_diff_avg_count = 0; - /* since we do not have a precise anough audio FIFO fullness, - we correct audio sync only if larger than this threshold */ - is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec; - - is->audio_stream = stream_index; - is->audio_st = ic->streams[stream_index]; - - if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0) - goto fail; - if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) { - is->auddec.start_pts = is->audio_st->start_time; - is->auddec.start_pts_tb = is->audio_st->time_base; - } - if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0) - goto out; - SDL_PauseAudioDevice(audio_dev, 0); - break; - case AVMEDIA_TYPE_VIDEO: - is->video_stream = stream_index; - is->video_st = ic->streams[stream_index]; - - if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0) - goto fail; - if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0) - goto out; - is->queue_attachments_req = 1; - break; - case AVMEDIA_TYPE_SUBTITLE: - is->subtitle_stream = stream_index; - is->subtitle_st = ic->streams[stream_index]; - - if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0) - goto fail; - if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0) - goto out; - break; - default: - break; - } - goto out; - -fail: - avcodec_free_context(&avctx); -out: - av_channel_layout_uninit(&ch_layout); - av_dict_free(&opts); - - return ret; -} - -static int decode_interrupt_cb(void *ctx) -{ - VideoState *is = ctx; - return is->abort_request; -} - -static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) { - return stream_id < 0 || - queue->abort_request || - (st->disposition & AV_DISPOSITION_ATTACHED_PIC) || - queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0); -} - -static int is_realtime(AVFormatContext *s) -{ - if( !strcmp(s->iformat->name, "rtp") - || !strcmp(s->iformat->name, "rtsp") - || !strcmp(s->iformat->name, "sdp") - ) - return 1; - - if(s->pb && ( !strncmp(s->url, "rtp:", 4) - || !strncmp(s->url, "udp:", 4) - ) - ) - return 1; - return 0; -} - -/* this thread gets the stream from the disk or the network */ -static int read_thread(void *arg) -{ - VideoState *is = arg; - AVFormatContext *ic = NULL; - int err, i, ret; - int st_index[AVMEDIA_TYPE_NB]; - AVPacket *pkt = NULL; - int64_t stream_start_time; - int pkt_in_play_range = 0; - const AVDictionaryEntry *t; - SDL_mutex *wait_mutex = SDL_CreateMutex(); - int scan_all_pmts_set = 0; - int64_t pkt_ts; - - if (!wait_mutex) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError()); - ret = AVERROR(ENOMEM); - goto fail; - } - - memset(st_index, -1, sizeof(st_index)); - is->eof = 0; - - pkt = av_packet_alloc(); - if (!pkt) { - av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n"); - ret = AVERROR(ENOMEM); - goto fail; - } - ic = avformat_alloc_context(); - if (!ic) { - av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n"); - ret = AVERROR(ENOMEM); - goto fail; - } - ic->interrupt_callback.callback = decode_interrupt_cb; - ic->interrupt_callback.opaque = is; - if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) { - av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE); - scan_all_pmts_set = 1; - } - err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts); - if (err < 0) { - print_error(is->filename, err); - ret = -1; - goto fail; - } - if (scan_all_pmts_set) - av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE); - - if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) { - av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key); - ret = AVERROR_OPTION_NOT_FOUND; - goto fail; - } - is->ic = ic; - - if (genpts) - ic->flags |= AVFMT_FLAG_GENPTS; - - av_format_inject_global_side_data(ic); - - if (find_stream_info) { - AVDictionary **opts = setup_find_stream_info_opts(ic, codec_opts); - int orig_nb_streams = ic->nb_streams; - - err = avformat_find_stream_info(ic, opts); - - for (i = 0; i < orig_nb_streams; i++) - av_dict_free(&opts[i]); - av_freep(&opts); - - if (err < 0) { - av_log(NULL, AV_LOG_WARNING, - "%s: could not find codec parameters\n", is->filename); - ret = -1; - goto fail; - } - } - - if (ic->pb) - ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end - - if (seek_by_bytes < 0) - seek_by_bytes = !(ic->iformat->flags & AVFMT_NO_BYTE_SEEK) && - !!(ic->iformat->flags & AVFMT_TS_DISCONT) && - strcmp("ogg", ic->iformat->name); - - is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0; - - if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0))) - window_title = av_asprintf("%s - %s", t->value, input_filename); - - /* if seeking requested, we execute it */ - if (start_time != AV_NOPTS_VALUE) { - int64_t timestamp; - - timestamp = start_time; - /* add the stream start time */ - if (ic->start_time != AV_NOPTS_VALUE) - timestamp += ic->start_time; - ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0); - if (ret < 0) { - av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n", - is->filename, (double)timestamp / AV_TIME_BASE); - } - } - - is->realtime = is_realtime(ic); - - if (show_status) - av_dump_format(ic, 0, is->filename, 0); - - for (i = 0; i < ic->nb_streams; i++) { - AVStream *st = ic->streams[i]; - enum AVMediaType type = st->codecpar->codec_type; - st->discard = AVDISCARD_ALL; - if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1) - if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0) - st_index[type] = i; - } - for (i = 0; i < AVMEDIA_TYPE_NB; i++) { - if (wanted_stream_spec[i] && st_index[i] == -1) { - av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i)); - st_index[i] = INT_MAX; - } - } - - if (!video_disable) - st_index[AVMEDIA_TYPE_VIDEO] = - av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, - st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0); - if (!audio_disable) - st_index[AVMEDIA_TYPE_AUDIO] = - av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO, - st_index[AVMEDIA_TYPE_AUDIO], - st_index[AVMEDIA_TYPE_VIDEO], - NULL, 0); - if (!video_disable && !subtitle_disable) - st_index[AVMEDIA_TYPE_SUBTITLE] = - av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE, - st_index[AVMEDIA_TYPE_SUBTITLE], - (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ? - st_index[AVMEDIA_TYPE_AUDIO] : - st_index[AVMEDIA_TYPE_VIDEO]), - NULL, 0); - - is->show_mode = show_mode; - if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { - AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]]; - AVCodecParameters *codecpar = st->codecpar; - AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL); - if (codecpar->width) - set_default_window_size(codecpar->width, codecpar->height, sar); - } - - /* open the streams */ - if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) { - stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]); - } - - ret = -1; - if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) { - ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]); - } - if (is->show_mode == SHOW_MODE_NONE) - is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT; - - if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) { - stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]); - } - - if (is->video_stream < 0 && is->audio_stream < 0) { - av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n", - is->filename); - ret = -1; - goto fail; - } - - if (infinite_buffer < 0 && is->realtime) - infinite_buffer = 1; - - for (;;) { - if (is->abort_request) - break; - if (is->paused != is->last_paused) { - is->last_paused = is->paused; - if (is->paused) - is->read_pause_return = av_read_pause(ic); - else - av_read_play(ic); - } -#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL - if (is->paused && - (!strcmp(ic->iformat->name, "rtsp") || - (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) { - /* wait 10 ms to avoid trying to get another packet */ - /* XXX: horrible */ - SDL_Delay(10); - continue; - } -#endif - if (is->seek_req) { - int64_t seek_target = is->seek_pos; - int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN; - int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX; -// FIXME the +-2 is due to rounding being not done in the correct direction in generation -// of the seek_pos/seek_rel variables - - ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, - "%s: error while seeking\n", is->ic->url); - } else { - if (is->audio_stream >= 0) - packet_queue_flush(&is->audioq); - if (is->subtitle_stream >= 0) - packet_queue_flush(&is->subtitleq); - if (is->video_stream >= 0) - packet_queue_flush(&is->videoq); - if (is->seek_flags & AVSEEK_FLAG_BYTE) { - set_clock(&is->extclk, NAN, 0); - } else { - set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0); - } - } - is->seek_req = 0; - is->queue_attachments_req = 1; - is->eof = 0; - if (is->paused) - step_to_next_frame(is); - } - if (is->queue_attachments_req) { - if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) { - if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0) - goto fail; - packet_queue_put(&is->videoq, pkt); - packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream); - } - is->queue_attachments_req = 0; - } - - /* if the queue are full, no need to read more */ - if (infinite_buffer<1 && - (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE - || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) && - stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) && - stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) { - /* wait 10 ms */ - SDL_LockMutex(wait_mutex); - SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); - SDL_UnlockMutex(wait_mutex); - continue; - } - if (!is->paused && - (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) && - (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) { - if (loop != 1 && (!loop || --loop)) { - stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0); - } else if (autoexit) { - ret = AVERROR_EOF; - goto fail; - } - } - ret = av_read_frame(ic, pkt); - if (ret < 0) { - if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) { - if (is->video_stream >= 0) - packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream); - if (is->audio_stream >= 0) - packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream); - if (is->subtitle_stream >= 0) - packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream); - is->eof = 1; - } - if (ic->pb && ic->pb->error) { - if (autoexit) - goto fail; - else - break; - } - SDL_LockMutex(wait_mutex); - SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10); - SDL_UnlockMutex(wait_mutex); - continue; - } else { - is->eof = 0; - } - /* check if packet is in play range specified by user, then queue, otherwise discard */ - stream_start_time = ic->streams[pkt->stream_index]->start_time; - pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts; - pkt_in_play_range = duration == AV_NOPTS_VALUE || - (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) * - av_q2d(ic->streams[pkt->stream_index]->time_base) - - (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000 - <= ((double)duration / 1000000); - if (pkt->stream_index == is->audio_stream && pkt_in_play_range) { - packet_queue_put(&is->audioq, pkt); - } else if (pkt->stream_index == is->video_stream && pkt_in_play_range - && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) { - packet_queue_put(&is->videoq, pkt); - } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) { - packet_queue_put(&is->subtitleq, pkt); - } else { - av_packet_unref(pkt); - } - } - - ret = 0; - fail: - if (ic && !is->ic) - avformat_close_input(&ic); - - av_packet_free(&pkt); - if (ret != 0) { - SDL_Event event; - - event.type = FF_QUIT_EVENT; - event.user.data1 = is; - SDL_PushEvent(&event); - } - SDL_DestroyMutex(wait_mutex); - return 0; -} - -static VideoState *stream_open(const char *filename, - const AVInputFormat *iformat) -{ - VideoState *is; - - is = av_mallocz(sizeof(VideoState)); - if (!is) - return NULL; - is->last_video_stream = is->video_stream = -1; - is->last_audio_stream = is->audio_stream = -1; - is->last_subtitle_stream = is->subtitle_stream = -1; - is->filename = av_strdup(filename); - if (!is->filename) - goto fail; - is->iformat = iformat; - is->ytop = 0; - is->xleft = 0; - - /* start video display */ - if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0) - goto fail; - if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0) - goto fail; - if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0) - goto fail; - - if (packet_queue_init(&is->videoq) < 0 || - packet_queue_init(&is->audioq) < 0 || - packet_queue_init(&is->subtitleq) < 0) - goto fail; - - if (!(is->continue_read_thread = SDL_CreateCond())) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError()); - goto fail; - } - - init_clock(&is->vidclk, &is->videoq.serial); - init_clock(&is->audclk, &is->audioq.serial); - init_clock(&is->extclk, &is->extclk.serial); - is->audio_clock_serial = -1; - if (startup_volume < 0) - av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume); - if (startup_volume > 100) - av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume); - startup_volume = av_clip(startup_volume, 0, 100); - startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME); - is->audio_volume = startup_volume; - is->muted = 0; - is->av_sync_type = av_sync_type; - is->read_tid = SDL_CreateThread(read_thread, "read_thread", is); - if (!is->read_tid) { - av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError()); -fail: - stream_close(is); - return NULL; - } - return is; -} - -static void stream_cycle_channel(VideoState *is, int codec_type) -{ - AVFormatContext *ic = is->ic; - int start_index, stream_index; - int old_index; - AVStream *st; - AVProgram *p = NULL; - int nb_streams = is->ic->nb_streams; - - if (codec_type == AVMEDIA_TYPE_VIDEO) { - start_index = is->last_video_stream; - old_index = is->video_stream; - } else if (codec_type == AVMEDIA_TYPE_AUDIO) { - start_index = is->last_audio_stream; - old_index = is->audio_stream; - } else { - start_index = is->last_subtitle_stream; - old_index = is->subtitle_stream; - } - stream_index = start_index; - - if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) { - p = av_find_program_from_stream(ic, NULL, is->video_stream); - if (p) { - nb_streams = p->nb_stream_indexes; - for (start_index = 0; start_index < nb_streams; start_index++) - if (p->stream_index[start_index] == stream_index) - break; - if (start_index == nb_streams) - start_index = -1; - stream_index = start_index; - } - } - - for (;;) { - if (++stream_index >= nb_streams) - { - if (codec_type == AVMEDIA_TYPE_SUBTITLE) - { - stream_index = -1; - is->last_subtitle_stream = -1; - goto the_end; - } - if (start_index == -1) - return; - stream_index = 0; - } - if (stream_index == start_index) - return; - st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index]; - if (st->codecpar->codec_type == codec_type) { - /* check that parameters are OK */ - switch (codec_type) { - case AVMEDIA_TYPE_AUDIO: - if (st->codecpar->sample_rate != 0 && - st->codecpar->ch_layout.nb_channels != 0) - goto the_end; - break; - case AVMEDIA_TYPE_VIDEO: - case AVMEDIA_TYPE_SUBTITLE: - goto the_end; - default: - break; - } - } - } - the_end: - if (p && stream_index != -1) - stream_index = p->stream_index[stream_index]; - av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n", - av_get_media_type_string(codec_type), - old_index, - stream_index); - - stream_component_close(is, old_index); - stream_component_open(is, stream_index); -} - - -static void toggle_full_screen(VideoState *is) -{ - is_full_screen = !is_full_screen; - SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0); -} - -static void toggle_audio_display(VideoState *is) -{ - int next = is->show_mode; - do { - next = (next + 1) % SHOW_MODE_NB; - } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st)); - if (is->show_mode != next) { - is->force_refresh = 1; - is->show_mode = next; - } -} - -static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) { - double remaining_time = 0.0; - SDL_PumpEvents(); - while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) { - if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) { - SDL_ShowCursor(0); - cursor_hidden = 1; - } - if (remaining_time > 0.0) - av_usleep((int64_t)(remaining_time * 1000000.0)); - remaining_time = REFRESH_RATE; - if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh)) - video_refresh(is, &remaining_time); - SDL_PumpEvents(); - } -} - -static void seek_chapter(VideoState *is, int incr) -{ - int64_t pos = get_master_clock(is) * AV_TIME_BASE; - int i; - - if (!is->ic->nb_chapters) - return; - - /* find the current chapter */ - for (i = 0; i < is->ic->nb_chapters; i++) { - AVChapter *ch = is->ic->chapters[i]; - if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) { - i--; - break; - } - } - - i += incr; - i = FFMAX(i, 0); - if (i >= is->ic->nb_chapters) - return; - - av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i); - stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base, - AV_TIME_BASE_Q), 0, 0); -} - -/* handle an event sent by the GUI */ -static void event_loop(VideoState *cur_stream) -{ - SDL_Event event; - double incr, pos, frac; - - for (;;) { - double x; - refresh_loop_wait_event(cur_stream, &event); - switch (event.type) { - case SDL_KEYDOWN: - if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) { - do_exit(cur_stream); - break; - } - // If we don't yet have a window, skip all key events, because read_thread might still be initializing... - if (!cur_stream->width) - continue; - switch (event.key.keysym.sym) { - case SDLK_f: - toggle_full_screen(cur_stream); - cur_stream->force_refresh = 1; - break; - case SDLK_p: - case SDLK_SPACE: - toggle_pause(cur_stream); - break; - case SDLK_m: - toggle_mute(cur_stream); - break; - case SDLK_KP_MULTIPLY: - case SDLK_0: - update_volume(cur_stream, 1, SDL_VOLUME_STEP); - break; - case SDLK_KP_DIVIDE: - case SDLK_9: - update_volume(cur_stream, -1, SDL_VOLUME_STEP); - break; - case SDLK_s: // S: Step to next frame - step_to_next_frame(cur_stream); - break; - case SDLK_a: - stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO); - break; - case SDLK_v: - stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO); - break; - case SDLK_c: - stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO); - stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO); - stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE); - break; - case SDLK_t: - stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE); - break; - case SDLK_w: - if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) { - if (++cur_stream->vfilter_idx >= nb_vfilters) - cur_stream->vfilter_idx = 0; - } else { - cur_stream->vfilter_idx = 0; - toggle_audio_display(cur_stream); - } - break; - case SDLK_PAGEUP: - if (cur_stream->ic->nb_chapters <= 1) { - incr = 600.0; - goto do_seek; - } - seek_chapter(cur_stream, 1); - break; - case SDLK_PAGEDOWN: - if (cur_stream->ic->nb_chapters <= 1) { - incr = -600.0; - goto do_seek; - } - seek_chapter(cur_stream, -1); - break; - case SDLK_LEFT: - incr = seek_interval ? -seek_interval : -10.0; - goto do_seek; - case SDLK_RIGHT: - incr = seek_interval ? seek_interval : 10.0; - goto do_seek; - case SDLK_UP: - incr = 60.0; - goto do_seek; - case SDLK_DOWN: - incr = -60.0; - do_seek: - if (seek_by_bytes) { - pos = -1; - if (pos < 0 && cur_stream->video_stream >= 0) - pos = frame_queue_last_pos(&cur_stream->pictq); - if (pos < 0 && cur_stream->audio_stream >= 0) - pos = frame_queue_last_pos(&cur_stream->sampq); - if (pos < 0) - pos = avio_tell(cur_stream->ic->pb); - if (cur_stream->ic->bit_rate) - incr *= cur_stream->ic->bit_rate / 8.0; - else - incr *= 180000.0; - pos += incr; - stream_seek(cur_stream, pos, incr, 1); - } else { - pos = get_master_clock(cur_stream); - if (isnan(pos)) - pos = (double)cur_stream->seek_pos / AV_TIME_BASE; - pos += incr; - if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE) - pos = cur_stream->ic->start_time / (double)AV_TIME_BASE; - stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0); - } - break; - default: - break; - } - break; - case SDL_MOUSEBUTTONDOWN: - if (exit_on_mousedown) { - do_exit(cur_stream); - break; - } - if (event.button.button == SDL_BUTTON_LEFT) { - static int64_t last_mouse_left_click = 0; - if (av_gettime_relative() - last_mouse_left_click <= 500000) { - toggle_full_screen(cur_stream); - cur_stream->force_refresh = 1; - last_mouse_left_click = 0; - } else { - last_mouse_left_click = av_gettime_relative(); - } - } - case SDL_MOUSEMOTION: - if (cursor_hidden) { - SDL_ShowCursor(1); - cursor_hidden = 0; - } - cursor_last_shown = av_gettime_relative(); - if (event.type == SDL_MOUSEBUTTONDOWN) { - if (event.button.button != SDL_BUTTON_RIGHT) - break; - x = event.button.x; - } else { - if (!(event.motion.state & SDL_BUTTON_RMASK)) - break; - x = event.motion.x; - } - if (seek_by_bytes || cur_stream->ic->duration <= 0) { - uint64_t size = avio_size(cur_stream->ic->pb); - stream_seek(cur_stream, size*x/cur_stream->width, 0, 1); - } else { - int64_t ts; - int ns, hh, mm, ss; - int tns, thh, tmm, tss; - tns = cur_stream->ic->duration / 1000000LL; - thh = tns / 3600; - tmm = (tns % 3600) / 60; - tss = (tns % 60); - frac = x / cur_stream->width; - ns = frac * tns; - hh = ns / 3600; - mm = (ns % 3600) / 60; - ss = (ns % 60); - av_log(NULL, AV_LOG_INFO, - "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100, - hh, mm, ss, thh, tmm, tss); - ts = frac * cur_stream->ic->duration; - if (cur_stream->ic->start_time != AV_NOPTS_VALUE) - ts += cur_stream->ic->start_time; - stream_seek(cur_stream, ts, 0, 0); - } - break; - case SDL_WINDOWEVENT: - switch (event.window.event) { - case SDL_WINDOWEVENT_SIZE_CHANGED: - screen_width = cur_stream->width = event.window.data1; - screen_height = cur_stream->height = event.window.data2; - if (cur_stream->vis_texture) { - SDL_DestroyTexture(cur_stream->vis_texture); - cur_stream->vis_texture = NULL; - } - case SDL_WINDOWEVENT_EXPOSED: - cur_stream->force_refresh = 1; - } - break; - case SDL_QUIT: - case FF_QUIT_EVENT: - do_exit(cur_stream); - break; - default: - break; - } - } -} - -static int opt_width(void *optctx, const char *opt, const char *arg) -{ - screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX); - return 0; -} - -static int opt_height(void *optctx, const char *opt, const char *arg) -{ - screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX); - return 0; -} - -static int opt_format(void *optctx, const char *opt, const char *arg) -{ - file_iformat = av_find_input_format(arg); - if (!file_iformat) { - av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg); - return AVERROR(EINVAL); - } - return 0; -} - -static int opt_sync(void *optctx, const char *opt, const char *arg) -{ - if (!strcmp(arg, "audio")) - av_sync_type = AV_SYNC_AUDIO_MASTER; - else if (!strcmp(arg, "video")) - av_sync_type = AV_SYNC_VIDEO_MASTER; - else if (!strcmp(arg, "ext")) - av_sync_type = AV_SYNC_EXTERNAL_CLOCK; - else { - av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg); - exit(1); - } - return 0; -} - -static int opt_seek(void *optctx, const char *opt, const char *arg) -{ - start_time = parse_time_or_die(opt, arg, 1); - return 0; -} - -static int opt_duration(void *optctx, const char *opt, const char *arg) -{ - duration = parse_time_or_die(opt, arg, 1); - return 0; -} - -static int opt_show_mode(void *optctx, const char *opt, const char *arg) -{ - show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO : - !strcmp(arg, "waves") ? SHOW_MODE_WAVES : - !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : - parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1); - return 0; -} - -static void opt_input_file(void *optctx, const char *filename) -{ - if (input_filename) { - av_log(NULL, AV_LOG_FATAL, - "Argument '%s' provided as input filename, but '%s' was already specified.\n", - filename, input_filename); - exit(1); - } - if (!strcmp(filename, "-")) - filename = "fd:"; - input_filename = filename; -} - -static int opt_codec(void *optctx, const char *opt, const char *arg) -{ - const char *spec = strchr(opt, ':'); - if (!spec) { - av_log(NULL, AV_LOG_ERROR, - "No media specifier was specified in '%s' in option '%s'\n", - arg, opt); - return AVERROR(EINVAL); - } - spec++; - switch (spec[0]) { - case 'a' : audio_codec_name = arg; break; - case 's' : subtitle_codec_name = arg; break; - case 'v' : video_codec_name = arg; break; - default: - av_log(NULL, AV_LOG_ERROR, - "Invalid media specifier '%s' in option '%s'\n", spec, opt); - return AVERROR(EINVAL); - } - return 0; -} - -static int dummy; - -static const OptionDef options[] = { - CMDUTILS_COMMON_OPTIONS - { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" }, - { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" }, - { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" }, - { "an", OPT_BOOL, { &audio_disable }, "disable audio" }, - { "vn", OPT_BOOL, { &video_disable }, "disable video" }, - { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" }, - { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" }, - { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" }, - { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" }, - { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" }, - { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" }, - { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" }, - { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" }, - { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" }, - { "noborder", OPT_BOOL, { &borderless }, "borderless window" }, - { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" }, - { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" }, - { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" }, - { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" }, - { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" }, - { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" }, - { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""}, - { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" }, - { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" }, - { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" }, - { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" }, - { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" }, - { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" }, - { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" }, - { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" }, - { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" }, - { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" }, - { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" }, - { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" }, - { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" }, - { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" }, - { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" }, - { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"}, - { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" }, - { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" }, - { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" }, - { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" }, - { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" }, - { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info }, - "read and decode the streams to fill missing information with heuristics" }, - { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" }, - { NULL, }, -}; - -static void show_usage(void) -{ - av_log(NULL, AV_LOG_INFO, "Simple media player\n"); - av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name); - av_log(NULL, AV_LOG_INFO, "\n"); -} - -void show_help_default(const char *opt, const char *arg) -{ - av_log_set_callback(log_callback_help); - show_usage(); - show_help_options(options, "Main options:", 0, OPT_EXPERT, 0); - show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0); - printf("\n"); - show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM); - show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM); - show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM); - printf("\nWhile playing:\n" - "q, ESC quit\n" - "f toggle full screen\n" - "p, SPC pause\n" - "m toggle mute\n" - "9, 0 decrease and increase volume respectively\n" - "/, * decrease and increase volume respectively\n" - "a cycle audio channel in the current program\n" - "v cycle video channel\n" - "t cycle subtitle channel in the current program\n" - "c cycle program\n" - "w cycle video filters or show modes\n" - "s activate frame-step mode\n" - "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n" - "down/up seek backward/forward 1 minute\n" - "page down/page up seek backward/forward 10 minutes\n" - "right mouse click seek to percentage in file corresponding to fraction of width\n" - "left double-click toggle full screen\n" - ); -} - -/* Called from the main */ -int main(int argc, char **argv) -{ - int flags; - VideoState *is; - - init_dynload(); - - av_log_set_flags(AV_LOG_SKIP_REPEATED); - parse_loglevel(argc, argv, options); - - /* register all codecs, demux and protocols */ -#if CONFIG_AVDEVICE - avdevice_register_all(); -#endif - avformat_network_init(); - - signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ - signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ - - show_banner(argc, argv, options); - - parse_options(NULL, argc, argv, options, opt_input_file); - - if (!input_filename) { - show_usage(); - av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n"); - av_log(NULL, AV_LOG_FATAL, - "Use -h to get full help or, even better, run 'man %s'\n", program_name); - exit(1); - } - - if (display_disable) { - video_disable = 1; - } - flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER; - if (audio_disable) - flags &= ~SDL_INIT_AUDIO; - else { - /* Try to work around an occasional ALSA buffer underflow issue when the - * period size is NPOT due to ALSA resampling by forcing the buffer size. */ - if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE")) - SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1); - } - if (display_disable) - flags &= ~SDL_INIT_VIDEO; - if (SDL_Init (flags)) { - av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError()); - av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n"); - exit(1); - } - - SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE); - SDL_EventState(SDL_USEREVENT, SDL_IGNORE); - - if (!display_disable) { - int flags = SDL_WINDOW_HIDDEN; - if (alwaysontop) -#if SDL_VERSION_ATLEAST(2,0,5) - flags |= SDL_WINDOW_ALWAYS_ON_TOP; -#else - av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n"); -#endif - if (borderless) - flags |= SDL_WINDOW_BORDERLESS; - else - flags |= SDL_WINDOW_RESIZABLE; - -#ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR - SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0"); -#endif - window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags); - SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear"); - if (window) { - renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC); - if (!renderer) { - av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError()); - renderer = SDL_CreateRenderer(window, -1, 0); - } - if (renderer) { - if (!SDL_GetRendererInfo(renderer, &renderer_info)) - av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name); - } - } - if (!window || !renderer || !renderer_info.num_texture_formats) { - av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError()); - do_exit(NULL); - } - } - - is = stream_open(input_filename, file_iformat); - if (!is) { - av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n"); - do_exit(NULL); - } - - event_loop(is); - - /* never returns */ - - return 0; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdsubdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdsubdec.c deleted file mode 100644 index a5da0d7b0862478db8946cdbdffd13e75c50ce1b..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdsubdec.c +++ /dev/null @@ -1,719 +0,0 @@ -/* - * DVD subtitle decoding - * Copyright (c) 2005 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include "dvdsub.h" -#include "get_bits.h" - -#include "libavutil/attributes.h" -#include "libavutil/colorspace.h" -#include "libavutil/file_open.h" -#include "libavutil/opt.h" -#include "libavutil/bswap.h" - -typedef struct DVDSubContext -{ - AVClass *class; - uint32_t palette[16]; - char *palette_str; - char *ifo_str; - int has_palette; - uint8_t colormap[4]; - uint8_t alpha[256]; - uint8_t buf[0x10000]; - int buf_size; - int forced_subs_only; - uint8_t used_color[256]; -} DVDSubContext; - -static void yuv_a_to_rgba(const uint8_t *ycbcr, const uint8_t *alpha, uint32_t *rgba, int num_values) -{ - const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; - uint8_t r, g, b; - int i, y, cb, cr; - int r_add, g_add, b_add; - - for (i = num_values; i > 0; i--) { - y = *ycbcr++; - cr = *ycbcr++; - cb = *ycbcr++; - YUV_TO_RGB1_CCIR(cb, cr); - YUV_TO_RGB2_CCIR(r, g, b, y); - *rgba++ = ((unsigned)*alpha++ << 24) | (r << 16) | (g << 8) | b; - } -} - -static int decode_run_2bit(GetBitContext *gb, int *color) -{ - unsigned int v, t; - - v = 0; - for (t = 1; v < t && t <= 0x40; t <<= 2) - v = (v << 4) | get_bits(gb, 4); - *color = v & 3; - if (v < 4) { /* Code for fill rest of line */ - return INT_MAX; - } - return v >> 2; -} - -static int decode_run_8bit(GetBitContext *gb, int *color) -{ - int len; - int has_run = get_bits1(gb); - *color = get_bits(gb, 2 + 6*get_bits1(gb)); - if (has_run) { - if (get_bits1(gb)) { - len = get_bits(gb, 7); - if (len == 0) - len = INT_MAX; - else - len += 9; - } else - len = get_bits(gb, 3) + 2; - } else - len = 1; - return len; -} - -static int decode_rle(uint8_t *bitmap, int linesize, int w, int h, uint8_t used_color[256], - const uint8_t *buf, int start, int buf_size, int is_8bit) -{ - GetBitContext gb; - int bit_len; - int x, y, len, color; - uint8_t *d; - - if (start >= buf_size) - return -1; - - if (w <= 0 || h <= 0) - return -1; - - bit_len = (buf_size - start) * 8; - init_get_bits(&gb, buf + start, bit_len); - - x = 0; - y = 0; - d = bitmap; - for(;;) { - if (get_bits_count(&gb) > bit_len) - return -1; - if (is_8bit) - len = decode_run_8bit(&gb, &color); - else - len = decode_run_2bit(&gb, &color); - if (len != INT_MAX && len > w - x) - return AVERROR_INVALIDDATA; - len = FFMIN(len, w - x); - memset(d + x, color, len); - used_color[color] = 1; - x += len; - if (x >= w) { - y++; - if (y >= h) - break; - d += linesize; - x = 0; - /* byte align */ - align_get_bits(&gb); - } - } - return 0; -} - -static void guess_palette(DVDSubContext* ctx, - uint32_t *rgba_palette, - uint32_t subtitle_color) -{ - static const uint8_t level_map[4][4] = { - // this configuration (full range, lowest to highest) in tests - // seemed most common, so assume this - {0xff}, - {0x00, 0xff}, - {0x00, 0x80, 0xff}, - {0x00, 0x55, 0xaa, 0xff}, - }; - uint8_t color_used[16] = { 0 }; - int nb_opaque_colors, i, level, j, r, g, b; - uint8_t *colormap = ctx->colormap, *alpha = ctx->alpha; - - if(ctx->has_palette) { - for(i = 0; i < 4; i++) - rgba_palette[i] = (ctx->palette[colormap[i]] & 0x00ffffff) - | ((alpha[i] * 17U) << 24); - return; - } - - for(i = 0; i < 4; i++) - rgba_palette[i] = 0; - - nb_opaque_colors = 0; - for(i = 0; i < 4; i++) { - if (alpha[i] != 0 && !color_used[colormap[i]]) { - color_used[colormap[i]] = 1; - nb_opaque_colors++; - } - } - - if (nb_opaque_colors == 0) - return; - - j = 0; - memset(color_used, 0, 16); - for(i = 0; i < 4; i++) { - if (alpha[i] != 0) { - if (!color_used[colormap[i]]) { - level = level_map[nb_opaque_colors - 1][j]; - r = (((subtitle_color >> 16) & 0xff) * level) >> 8; - g = (((subtitle_color >> 8) & 0xff) * level) >> 8; - b = (((subtitle_color >> 0) & 0xff) * level) >> 8; - rgba_palette[i] = b | (g << 8) | (r << 16) | ((alpha[i] * 17U) << 24); - color_used[colormap[i]] = (i + 1); - j++; - } else { - rgba_palette[i] = (rgba_palette[color_used[colormap[i]] - 1] & 0x00ffffff) | - ((alpha[i] * 17U) << 24); - } - } - } -} - -static void reset_rects(AVSubtitle *sub_header) -{ - int i; - - if (sub_header->rects) { - for (i = 0; i < sub_header->num_rects; i++) { - av_freep(&sub_header->rects[i]->data[0]); - av_freep(&sub_header->rects[i]->data[1]); - av_freep(&sub_header->rects[i]); - } - av_freep(&sub_header->rects); - sub_header->num_rects = 0; - } -} - -#define READ_OFFSET(a) (big_offsets ? AV_RB32(a) : AV_RB16(a)) - -static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header, - const uint8_t *buf, int buf_size) -{ - int cmd_pos, pos, cmd, x1, y1, x2, y2, next_cmd_pos; - int big_offsets, offset_size, is_8bit = 0; - const uint8_t *yuv_palette = NULL; - uint8_t *colormap = ctx->colormap, *alpha = ctx->alpha; - int date; - int i; - int is_menu = 0; - uint32_t size; - int64_t offset1, offset2; - - if (buf_size < 10) - return -1; - - if (AV_RB16(buf) == 0) { /* HD subpicture with 4-byte offsets */ - big_offsets = 1; - offset_size = 4; - cmd_pos = 6; - } else { - big_offsets = 0; - offset_size = 2; - cmd_pos = 2; - } - - size = READ_OFFSET(buf + (big_offsets ? 2 : 0)); - cmd_pos = READ_OFFSET(buf + cmd_pos); - - if (cmd_pos < 0 || cmd_pos > buf_size - 2 - offset_size) { - if (cmd_pos > size) { - av_log(ctx, AV_LOG_ERROR, "Discarding invalid packet\n"); - return 0; - } - return AVERROR(EAGAIN); - } - - while (cmd_pos > 0 && cmd_pos < buf_size - 2 - offset_size) { - date = AV_RB16(buf + cmd_pos); - next_cmd_pos = READ_OFFSET(buf + cmd_pos + 2); - ff_dlog(NULL, "cmd_pos=0x%04x next=0x%04x date=%d\n", - cmd_pos, next_cmd_pos, date); - pos = cmd_pos + 2 + offset_size; - offset1 = -1; - offset2 = -1; - x1 = y1 = x2 = y2 = 0; - while (pos < buf_size) { - cmd = buf[pos++]; - ff_dlog(NULL, "cmd=%02x\n", cmd); - switch(cmd) { - case 0x00: - /* menu subpicture */ - is_menu = 1; - break; - case 0x01: - /* set start date */ - sub_header->start_display_time = (date << 10) / 90; - break; - case 0x02: - /* set end date */ - sub_header->end_display_time = (date << 10) / 90; - break; - case 0x03: - /* set colormap */ - if ((buf_size - pos) < 2) - goto fail; - colormap[3] = buf[pos] >> 4; - colormap[2] = buf[pos] & 0x0f; - colormap[1] = buf[pos + 1] >> 4; - colormap[0] = buf[pos + 1] & 0x0f; - pos += 2; - break; - case 0x04: - /* set alpha */ - if ((buf_size - pos) < 2) - goto fail; - alpha[3] = buf[pos] >> 4; - alpha[2] = buf[pos] & 0x0f; - alpha[1] = buf[pos + 1] >> 4; - alpha[0] = buf[pos + 1] & 0x0f; - pos += 2; - ff_dlog(NULL, "alpha=%x%x%x%x\n", alpha[0],alpha[1],alpha[2],alpha[3]); - break; - case 0x05: - case 0x85: - if ((buf_size - pos) < 6) - goto fail; - x1 = (buf[pos] << 4) | (buf[pos + 1] >> 4); - x2 = ((buf[pos + 1] & 0x0f) << 8) | buf[pos + 2]; - y1 = (buf[pos + 3] << 4) | (buf[pos + 4] >> 4); - y2 = ((buf[pos + 4] & 0x0f) << 8) | buf[pos + 5]; - if (cmd & 0x80) - is_8bit = 1; - ff_dlog(NULL, "x1=%d x2=%d y1=%d y2=%d\n", x1, x2, y1, y2); - pos += 6; - break; - case 0x06: - if ((buf_size - pos) < 4) - goto fail; - offset1 = AV_RB16(buf + pos); - offset2 = AV_RB16(buf + pos + 2); - ff_dlog(NULL, "offset1=0x%04"PRIx64" offset2=0x%04"PRIx64"\n", offset1, offset2); - pos += 4; - break; - case 0x86: - if ((buf_size - pos) < 8) - goto fail; - offset1 = AV_RB32(buf + pos); - offset2 = AV_RB32(buf + pos + 4); - ff_dlog(NULL, "offset1=0x%04"PRIx64" offset2=0x%04"PRIx64"\n", offset1, offset2); - pos += 8; - break; - - case 0x83: - /* HD set palette */ - if ((buf_size - pos) < 768) - goto fail; - yuv_palette = buf + pos; - pos += 768; - break; - case 0x84: - /* HD set contrast (alpha) */ - if ((buf_size - pos) < 256) - goto fail; - for (i = 0; i < 256; i++) - alpha[i] = 0xFF - buf[pos+i]; - pos += 256; - break; - - case 0xff: - goto the_end; - default: - ff_dlog(NULL, "unrecognised subpicture command 0x%x\n", cmd); - goto the_end; - } - } - the_end: - if (offset1 >= buf_size || offset2 >= buf_size) - goto fail; - - if (offset1 >= 0 && offset2 >= 0) { - int w, h; - uint8_t *bitmap; - - /* decode the bitmap */ - w = x2 - x1 + 1; - if (w < 0) - w = 0; - h = y2 - y1 + 1; - if (h < 0) - h = 0; - if (w > 0 && h > 1) { - reset_rects(sub_header); - memset(ctx->used_color, 0, sizeof(ctx->used_color)); - sub_header->rects = av_mallocz(sizeof(*sub_header->rects)); - if (!sub_header->rects) - goto fail; - sub_header->rects[0] = av_mallocz(sizeof(AVSubtitleRect)); - if (!sub_header->rects[0]) - goto fail; - sub_header->num_rects = 1; - bitmap = sub_header->rects[0]->data[0] = av_malloc(w * h); - if (!bitmap) - goto fail; - if (decode_rle(bitmap, w * 2, w, (h + 1) / 2, ctx->used_color, - buf, offset1, buf_size, is_8bit) < 0) - goto fail; - if (decode_rle(bitmap + w, w * 2, w, h / 2, ctx->used_color, - buf, offset2, buf_size, is_8bit) < 0) - goto fail; - sub_header->rects[0]->data[1] = av_mallocz(AVPALETTE_SIZE); - if (!sub_header->rects[0]->data[1]) - goto fail; - if (is_8bit) { - if (!yuv_palette) - goto fail; - sub_header->rects[0]->nb_colors = 256; - yuv_a_to_rgba(yuv_palette, alpha, - (uint32_t *)sub_header->rects[0]->data[1], - 256); - } else { - sub_header->rects[0]->nb_colors = 4; - guess_palette(ctx, (uint32_t*)sub_header->rects[0]->data[1], - 0xffffff); - } - sub_header->rects[0]->x = x1; - sub_header->rects[0]->y = y1; - sub_header->rects[0]->w = w; - sub_header->rects[0]->h = h; - sub_header->rects[0]->type = SUBTITLE_BITMAP; - sub_header->rects[0]->linesize[0] = w; - sub_header->rects[0]->flags = is_menu ? AV_SUBTITLE_FLAG_FORCED : 0; - } - } - if (next_cmd_pos < cmd_pos) { - av_log(ctx, AV_LOG_ERROR, "Invalid command offset\n"); - break; - } - if (next_cmd_pos == cmd_pos) - break; - cmd_pos = next_cmd_pos; - } - if (sub_header->num_rects > 0) - return is_menu; - fail: - reset_rects(sub_header); - return -1; -} - -static int is_transp(const uint8_t *buf, int pitch, int n, - const uint8_t *transp_color) -{ - int i; - for(i = 0; i < n; i++) { - if (!transp_color[*buf]) - return 0; - buf += pitch; - } - return 1; -} - -/* return 0 if empty rectangle, 1 if non empty */ -static int find_smallest_bounding_rectangle(DVDSubContext *ctx, AVSubtitle *s) -{ - uint8_t transp_color[256] = { 0 }; - int y1, y2, x1, x2, y, w, h, i; - uint8_t *bitmap; - int transparent = 1; - - if (s->num_rects == 0 || !s->rects || s->rects[0]->w <= 0 || s->rects[0]->h <= 0) - return 0; - - for(i = 0; i < s->rects[0]->nb_colors; i++) { - if ((((uint32_t *)s->rects[0]->data[1])[i] >> 24) == 0) { - transp_color[i] = 1; - } else if (ctx->used_color[i]) - transparent = 0; - } - if (transparent) - return 0; - y1 = 0; - while (y1 < s->rects[0]->h && is_transp(s->rects[0]->data[0] + y1 * s->rects[0]->linesize[0], - 1, s->rects[0]->w, transp_color)) - y1++; - if (y1 == s->rects[0]->h) { - av_freep(&s->rects[0]->data[0]); - s->rects[0]->w = s->rects[0]->h = 0; - return 0; - } - - y2 = s->rects[0]->h - 1; - while (y2 > 0 && is_transp(s->rects[0]->data[0] + y2 * s->rects[0]->linesize[0], 1, - s->rects[0]->w, transp_color)) - y2--; - x1 = 0; - while (x1 < (s->rects[0]->w - 1) && is_transp(s->rects[0]->data[0] + x1, s->rects[0]->linesize[0], - s->rects[0]->h, transp_color)) - x1++; - x2 = s->rects[0]->w - 1; - while (x2 > 0 && is_transp(s->rects[0]->data[0] + x2, s->rects[0]->linesize[0], s->rects[0]->h, - transp_color)) - x2--; - w = x2 - x1 + 1; - h = y2 - y1 + 1; - bitmap = av_malloc(w * h); - if (!bitmap) - return 1; - for(y = 0; y < h; y++) { - memcpy(bitmap + w * y, s->rects[0]->data[0] + x1 + (y1 + y) * s->rects[0]->linesize[0], w); - } - av_freep(&s->rects[0]->data[0]); - s->rects[0]->data[0] = bitmap; - s->rects[0]->linesize[0] = w; - s->rects[0]->w = w; - s->rects[0]->h = h; - s->rects[0]->x += x1; - s->rects[0]->y += y1; - - return 1; -} - -static int append_to_cached_buf(AVCodecContext *avctx, - const uint8_t *buf, int buf_size) -{ - DVDSubContext *ctx = avctx->priv_data; - - av_assert0(buf_size >= 0 && ctx->buf_size <= sizeof(ctx->buf)); - if (buf_size >= sizeof(ctx->buf) - ctx->buf_size) { - av_log(avctx, AV_LOG_WARNING, "Attempt to reconstruct " - "too large SPU packets aborted.\n"); - ctx->buf_size = 0; - return AVERROR_INVALIDDATA; - } - memcpy(ctx->buf + ctx->buf_size, buf, buf_size); - ctx->buf_size += buf_size; - return 0; -} - -static int dvdsub_decode(AVCodecContext *avctx, AVSubtitle *sub, - int *data_size, const AVPacket *avpkt) -{ - DVDSubContext *ctx = avctx->priv_data; - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - int appended = 0; - int is_menu; - - if (ctx->buf_size) { - int ret = append_to_cached_buf(avctx, buf, buf_size); - if (ret < 0) { - *data_size = 0; - return ret; - } - buf = ctx->buf; - buf_size = ctx->buf_size; - appended = 1; - } - - is_menu = decode_dvd_subtitles(ctx, sub, buf, buf_size); - if (is_menu == AVERROR(EAGAIN)) { - *data_size = 0; - return appended ? 0 : append_to_cached_buf(avctx, buf, buf_size); - } - - if (is_menu < 0) { - ctx->buf_size = 0; - no_subtitle: - reset_rects(sub); - *data_size = 0; - - return buf_size; - } - if (!is_menu && find_smallest_bounding_rectangle(ctx, sub) == 0) - goto no_subtitle; - - if (ctx->forced_subs_only && !(sub->rects[0]->flags & AV_SUBTITLE_FLAG_FORCED)) - goto no_subtitle; - - ctx->buf_size = 0; - *data_size = 1; - return buf_size; -} - -static int parse_ifo_palette(DVDSubContext *ctx, char *p) -{ - FILE *ifo; - char ifostr[12]; - uint32_t sp_pgci, pgci, off_pgc, pgc; - uint8_t r, g, b, yuv[65], *buf; - int i, y, cb, cr, r_add, g_add, b_add; - int ret = 0; - const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP; - - ctx->has_palette = 0; - if ((ifo = avpriv_fopen_utf8(p, "r")) == NULL) { - av_log(ctx, AV_LOG_WARNING, "Unable to open IFO file \"%s\": %s\n", p, av_err2str(AVERROR(errno))); - return AVERROR_EOF; - } - if (fread(ifostr, 12, 1, ifo) != 1 || memcmp(ifostr, "DVDVIDEO-VTS", 12)) { - av_log(ctx, AV_LOG_WARNING, "\"%s\" is not a proper IFO file\n", p); - ret = AVERROR_INVALIDDATA; - goto end; - } - if (fseek(ifo, 0xCC, SEEK_SET) == -1) { - ret = AVERROR(errno); - goto end; - } - if (fread(&sp_pgci, 4, 1, ifo) == 1) { - pgci = av_be2ne32(sp_pgci) * 2048; - if (fseek(ifo, pgci + 0x0C, SEEK_SET) == -1) { - ret = AVERROR(errno); - goto end; - } - if (fread(&off_pgc, 4, 1, ifo) == 1) { - pgc = pgci + av_be2ne32(off_pgc); - if (fseek(ifo, pgc + 0xA4, SEEK_SET) == -1) { - ret = AVERROR(errno); - goto end; - } - if (fread(yuv, 64, 1, ifo) == 1) { - buf = yuv; - for(i=0; i<16; i++) { - y = *++buf; - cr = *++buf; - cb = *++buf; - YUV_TO_RGB1_CCIR(cb, cr); - YUV_TO_RGB2_CCIR(r, g, b, y); - ctx->palette[i] = (r << 16) + (g << 8) + b; - buf++; - } - ctx->has_palette = 1; - } - } - } - if (ctx->has_palette == 0) { - av_log(ctx, AV_LOG_WARNING, "Failed to read palette from IFO file \"%s\"\n", p); - ret = AVERROR_INVALIDDATA; - } -end: - fclose(ifo); - return ret; -} - -static int dvdsub_parse_extradata(AVCodecContext *avctx) -{ - DVDSubContext *ctx = (DVDSubContext*) avctx->priv_data; - char *dataorig, *data; - int ret = 1; - - if (!avctx->extradata || !avctx->extradata_size) - return 1; - - dataorig = data = av_malloc(avctx->extradata_size+1); - if (!data) - return AVERROR(ENOMEM); - memcpy(data, avctx->extradata, avctx->extradata_size); - data[avctx->extradata_size] = '\0'; - - for(;;) { - int pos = strcspn(data, "\n\r"); - if (pos==0 && *data==0) - break; - - if (strncmp("palette:", data, 8) == 0) { - ctx->has_palette = 1; - ff_dvdsub_parse_palette(ctx->palette, data + 8); - } else if (strncmp("size:", data, 5) == 0) { - int w, h; - if (sscanf(data + 5, "%dx%d", &w, &h) == 2) { - ret = ff_set_dimensions(avctx, w, h); - if (ret < 0) - goto fail; - } - } - - data += pos; - data += strspn(data, "\n\r"); - } - -fail: - av_free(dataorig); - return ret; -} - -static av_cold int dvdsub_init(AVCodecContext *avctx) -{ - DVDSubContext *ctx = avctx->priv_data; - int ret; - - if ((ret = dvdsub_parse_extradata(avctx)) < 0) - return ret; - - if (ctx->ifo_str) - parse_ifo_palette(ctx, ctx->ifo_str); - if (ctx->palette_str) { - ctx->has_palette = 1; - ff_dvdsub_parse_palette(ctx->palette, ctx->palette_str); - } - if (ctx->has_palette) { - int i; - av_log(avctx, AV_LOG_DEBUG, "palette:"); - for(i=0;i<16;i++) - av_log(avctx, AV_LOG_DEBUG, " 0x%06"PRIx32, ctx->palette[i]); - av_log(avctx, AV_LOG_DEBUG, "\n"); - } - - return 1; -} - -static void dvdsub_flush(AVCodecContext *avctx) -{ - DVDSubContext *ctx = avctx->priv_data; - ctx->buf_size = 0; -} - -#define OFFSET(field) offsetof(DVDSubContext, field) -#define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM -static const AVOption options[] = { - { "palette", "set the global palette", OFFSET(palette_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, SD }, - { "ifo_palette", "obtain the global palette from .IFO file", OFFSET(ifo_str), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, SD }, - { "forced_subs_only", "Only show forced subtitles", OFFSET(forced_subs_only), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, SD}, - { NULL } -}; -static const AVClass dvdsub_class = { - .class_name = "dvdsubdec", - .item_name = av_default_item_name, - .option = options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_dvdsub_decoder = { - .p.name = "dvdsub", - CODEC_LONG_NAME("DVD subtitles"), - .p.type = AVMEDIA_TYPE_SUBTITLE, - .p.id = AV_CODEC_ID_DVD_SUBTITLE, - .priv_data_size = sizeof(DVDSubContext), - .init = dvdsub_init, - FF_CODEC_DECODE_SUB_CB(dvdsub_decode), - .flush = dvdsub_flush, - .p.priv_class = &dvdsub_class, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dynamic_hdr_vivid.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dynamic_hdr_vivid.c deleted file mode 100644 index a9b69107982ae3f3fad3ccdc56feaa3026f61e89..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dynamic_hdr_vivid.c +++ /dev/null @@ -1,148 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "dynamic_hdr_vivid.h" -#include "get_bits.h" - -static const int32_t maxrgb_den = 4095; -static const int32_t color_saturation_gain_den = 128; -static const int32_t maximum_luminance_den = 4095; -static const int32_t base_param_m_p_den = 16383; -static const int32_t base_param_m_m_den = 10; -static const int32_t base_param_m_a_den = 1023; -static const int32_t base_param_m_b_den = 1023; -static const int32_t base_param_m_n_den = 10; -static const int32_t base_param_Delta_den = 127; - -int ff_parse_itu_t_t35_to_dynamic_hdr_vivid(AVDynamicHDRVivid *s, const uint8_t *data, - int size) -{ - GetBitContext gbc, *gb = &gbc; - int ret; - - if (!s) - return AVERROR(ENOMEM); - - ret = init_get_bits8(gb, data, size); - if (ret < 0) - return ret; - - if (get_bits_left(gb) < 8) - return AVERROR_INVALIDDATA; - - s->system_start_code = get_bits(gb, 8); - // T/UWA 005.1-2022, table 11 - if (s->system_start_code >= 0x01 && s->system_start_code <= 0x07) { - s->num_windows = 1; - - if (get_bits_left(gb) < 12 * 4 * s->num_windows) - return AVERROR_INVALIDDATA; - for (int w = 0; w < s->num_windows; w++) { - AVHDRVividColorTransformParams *params = &s->params[w]; - - params->minimum_maxrgb = (AVRational){get_bits(gb, 12), maxrgb_den}; - params->average_maxrgb = (AVRational){get_bits(gb, 12), maxrgb_den}; - params->variance_maxrgb = (AVRational){get_bits(gb, 12), maxrgb_den}; - params->maximum_maxrgb = (AVRational){get_bits(gb, 12), maxrgb_den}; - } - - if (get_bits_left(gb) < 2 * s->num_windows) - return AVERROR_INVALIDDATA; - for (int w = 0; w < s->num_windows; w++) { - AVHDRVividColorTransformParams *params = &s->params[w]; - - params->tone_mapping_mode_flag = get_bits(gb, 1); - if (params->tone_mapping_mode_flag) { - if (get_bits_left(gb) < 1 ) - return AVERROR_INVALIDDATA; - params->tone_mapping_param_num = get_bits(gb, 1) + 1; - for (int i = 0; i < params->tone_mapping_param_num; i++) { - AVHDRVividColorToneMappingParams *tm_params = ¶ms->tm_params[i]; - - if (get_bits_left(gb) < 13) - return AVERROR_INVALIDDATA; - tm_params->targeted_system_display_maximum_luminance = (AVRational){get_bits(gb, 12), maximum_luminance_den}; - tm_params->base_enable_flag = get_bits(gb, 1); - if (tm_params->base_enable_flag) { - if (get_bits_left(gb) < (14 + 6 + 10 + 10 + 6 + 8 + 10)) - return AVERROR_INVALIDDATA; - tm_params->base_param_m_p = (AVRational){get_bits(gb, 14), base_param_m_p_den}; - tm_params->base_param_m_m = (AVRational){get_bits(gb, 6), base_param_m_m_den}; - tm_params->base_param_m_a = (AVRational){get_bits(gb, 10), base_param_m_a_den}; - tm_params->base_param_m_b = (AVRational){get_bits(gb, 10), base_param_m_b_den}; - tm_params->base_param_m_n = (AVRational){get_bits(gb, 6), base_param_m_n_den}; - tm_params->base_param_k1 = get_bits(gb, 2); - tm_params->base_param_k2 = get_bits(gb, 2); - tm_params->base_param_k3 = get_bits(gb, 4); - tm_params->base_param_Delta_enable_mode = get_bits(gb, 3); - tm_params->base_param_Delta = (AVRational){get_bits(gb, 7), base_param_Delta_den}; - } - if (get_bits_left(gb) < 1) - return AVERROR_INVALIDDATA; - tm_params->three_Spline_enable_flag = get_bits(gb, 1); - if (tm_params->three_Spline_enable_flag) { - AVHDRVivid3SplineParams *three_spline; - - if (get_bits_left(gb) < 1 + tm_params->three_Spline_num * (2 + 12 + 28 + 1)) - return AVERROR_INVALIDDATA; - tm_params->three_Spline_num = get_bits(gb, 1) + 1; - if (tm_params->three_Spline_num > FF_ARRAY_ELEMS(tm_params->three_spline)) - return AVERROR_INVALIDDATA; - for (int j = 0; j < tm_params->three_Spline_num; j++) { - three_spline = &tm_params->three_spline[j]; - three_spline->th_mode = get_bits(gb, 2); - if (three_spline->th_mode == 0 || three_spline->th_mode == 2) { - if (get_bits_left(gb) < 8) - return AVERROR_INVALIDDATA; - three_spline->th_enable_mb = (AVRational){get_bits(gb, 8), 255}; - } - three_spline->th_enable = (AVRational){get_bits(gb, 12), 4095}; - three_spline->th_delta1 = (AVRational){get_bits(gb, 10), 1023}; - three_spline->th_delta2 = (AVRational){get_bits(gb, 10), 1023}; - three_spline->enable_strength = (AVRational){get_bits(gb, 8), 255}; - } -#if FF_API_HDR_VIVID_THREE_SPLINE - three_spline = &tm_params->three_spline[0]; -FF_DISABLE_DEPRECATION_WARNINGS - tm_params->three_Spline_TH_mode = three_spline->th_mode; - tm_params->three_Spline_TH_enable_MB = three_spline->th_enable_mb; - tm_params->three_Spline_TH_enable = three_spline->th_enable; - tm_params->three_Spline_TH_Delta1 = three_spline->th_delta1; - tm_params->three_Spline_TH_Delta2 = three_spline->th_delta2; - tm_params->three_Spline_enable_Strength = three_spline->enable_strength; -FF_ENABLE_DEPRECATION_WARNINGS -#endif - } - } - } - - params->color_saturation_mapping_flag = get_bits(gb, 1); - if (params->color_saturation_mapping_flag) { - if (get_bits_left(gb) < 3 + params->color_saturation_num * 8) - return AVERROR_INVALIDDATA; - - params->color_saturation_num = get_bits(gb, 3); - for (int i = 0; i < params->color_saturation_num; i++) { - params->color_saturation_gain[i] = (AVRational){get_bits(gb, 8), color_saturation_gain_den}; - } - } - } - } - - return 0; -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Dragons Rise of Berk APK - The Ultimate Guide to the Viking World.md b/spaces/congsaPfin/Manga-OCR/logs/Dragons Rise of Berk APK - The Ultimate Guide to the Viking World.md deleted file mode 100644 index 98c3b4d772359a3fa8535b1bd481241c6b91464c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Dragons Rise of Berk APK - The Ultimate Guide to the Viking World.md +++ /dev/null @@ -1,12 +0,0 @@ - -

,

,

,

,

,